Merge pull request #2491 from dnephin/bump-1.5.2

WIP: Bump 1.5.2
This commit is contained in:
Daniel Nephin 2015-12-03 17:11:51 -08:00
commit 8f48fa4747
50 changed files with 1373 additions and 743 deletions

1
.gitignore vendored
View File

@ -8,3 +8,4 @@
/docs/_site /docs/_site
/venv /venv
README.rst README.rst
compose/GITSHA

View File

@ -2,16 +2,14 @@ sudo: required
language: python language: python
services:
- docker
matrix: matrix:
include: include:
- os: linux - os: linux
services:
- docker
- os: osx - os: osx
language: generic language: generic
install: ./script/travis/install install: ./script/travis/install
script: script:

View File

@ -1,6 +1,28 @@
Change log Change log
========== ==========
1.5.2 (2015-12-03)
------------------
- Fixed a bug which broke the use of `environment` and `env_file` with
`extends`, and caused environment keys without values to have a `None`
value, instead of a value from the host environment.
- Fixed a regression in 1.5.1 that caused a warning about volumes to be
raised incorrectly when containers were recreated.
- Fixed a bug which prevented building a `Dockerfile` that used `ADD <url>`
- Fixed a bug with `docker-compose restart` which prevented it from
starting stopped containers.
- Fixed handling of SIGTERM and SIGINT to properly stop containers
- Add support for using a url as the value of `build`
- Improved the validation of the `expose` option
1.5.1 (2015-11-12) 1.5.1 (2015-11-12)
------------------ ------------------

View File

@ -8,6 +8,6 @@ COPY requirements.txt /code/requirements.txt
RUN pip install -r /code/requirements.txt RUN pip install -r /code/requirements.txt
ADD dist/docker-compose-release.tar.gz /code/docker-compose ADD dist/docker-compose-release.tar.gz /code/docker-compose
RUN pip install /code/docker-compose/docker-compose-* RUN pip install --no-deps /code/docker-compose/docker-compose-*
ENTRYPOINT ["/usr/bin/docker-compose"] ENTRYPOINT ["/usr/bin/docker-compose"]

View File

@ -7,6 +7,7 @@ include *.md
exclude README.md exclude README.md
include README.rst include README.rst
include compose/config/*.json include compose/config/*.json
include compose/GITSHA
recursive-include contrib/completion * recursive-include contrib/completion *
recursive-include tests * recursive-include tests *
global-exclude *.pyc global-exclude *.pyc

View File

@ -10,7 +10,7 @@ see [the list of features](docs/index.md#features).
Compose is great for development, testing, and staging environments, as well as Compose is great for development, testing, and staging environments, as well as
CI workflows. You can learn more about each case in CI workflows. You can learn more about each case in
[Common Use Cases](#common-use-cases). [Common Use Cases](docs/index.md#common-use-cases).
Using Compose is basically a three-step process. Using Compose is basically a three-step process.

View File

@ -1,3 +1,3 @@
from __future__ import unicode_literals from __future__ import unicode_literals
__version__ = '1.5.1' __version__ = '1.5.2'

View File

@ -12,12 +12,11 @@ from requests.exceptions import SSLError
from . import errors from . import errors
from . import verbose_proxy from . import verbose_proxy
from .. import __version__
from .. import config from .. import config
from ..project import Project from ..project import Project
from ..service import ConfigError
from .docker_client import docker_client from .docker_client import docker_client
from .utils import call_silently from .utils import call_silently
from .utils import get_version_info
from .utils import is_mac from .utils import is_mac
from .utils import is_ubuntu from .utils import is_ubuntu
@ -71,7 +70,7 @@ def get_client(verbose=False, version=None):
client = docker_client(version=version) client = docker_client(version=version)
if verbose: if verbose:
version_info = six.iteritems(client.version()) version_info = six.iteritems(client.version())
log.info("Compose version %s", __version__) log.info(get_version_info('full'))
log.info("Docker base_url: %s", client.base_url) log.info("Docker base_url: %s", client.base_url)
log.info("Docker version: %s", log.info("Docker version: %s",
", ".join("%s=%s" % item for item in version_info)) ", ".join("%s=%s" % item for item in version_info))
@ -84,16 +83,12 @@ def get_project(base_dir, config_path=None, project_name=None, verbose=False,
config_details = config.find(base_dir, config_path) config_details = config.find(base_dir, config_path)
api_version = '1.21' if use_networking else None api_version = '1.21' if use_networking else None
try: return Project.from_dicts(
return Project.from_dicts( get_project_name(config_details.working_dir, project_name),
get_project_name(config_details.working_dir, project_name), config.load(config_details),
config.load(config_details), get_client(verbose=verbose, version=api_version),
get_client(verbose=verbose, version=api_version), use_networking=use_networking,
use_networking=use_networking, network_driver=network_driver)
network_driver=network_driver,
)
except ConfigError as e:
raise errors.UserError(six.text_type(e))
def get_project_name(working_dir, project_name=None): def get_project_name(working_dir, project_name=None):

View File

@ -368,7 +368,6 @@ class TopLevelCommand(DocoptCommand):
allocates a TTY. allocates a TTY.
""" """
service = project.get_service(options['SERVICE']) service = project.get_service(options['SERVICE'])
detach = options['-d'] detach = options['-d']
if IS_WINDOWS_PLATFORM and not detach: if IS_WINDOWS_PLATFORM and not detach:
@ -380,22 +379,6 @@ class TopLevelCommand(DocoptCommand):
if options['--allow-insecure-ssl']: if options['--allow-insecure-ssl']:
log.warn(INSECURE_SSL_WARNING) log.warn(INSECURE_SSL_WARNING)
if not options['--no-deps']:
deps = service.get_linked_service_names()
if len(deps) > 0:
project.up(
service_names=deps,
start_deps=True,
strategy=ConvergenceStrategy.never,
)
elif project.use_networking:
project.ensure_network_exists()
tty = True
if detach or options['-T'] or not sys.stdin.isatty():
tty = False
if options['COMMAND']: if options['COMMAND']:
command = [options['COMMAND']] + options['ARGS'] command = [options['COMMAND']] + options['ARGS']
else: else:
@ -403,7 +386,7 @@ class TopLevelCommand(DocoptCommand):
container_options = { container_options = {
'command': command, 'command': command,
'tty': tty, 'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
'stdin_open': not detach, 'stdin_open': not detach,
'detach': detach, 'detach': detach,
} }
@ -435,31 +418,7 @@ class TopLevelCommand(DocoptCommand):
if options['--name']: if options['--name']:
container_options['name'] = options['--name'] container_options['name'] = options['--name']
try: run_one_off_container(container_options, project, service, options)
container = service.create_container(
quiet=True,
one_off=True,
**container_options
)
except APIError as e:
legacy.check_for_legacy_containers(
project.client,
project.name,
[service.name],
allow_one_off=False,
)
raise e
if detach:
container.start()
print(container.name)
else:
dockerpty.start(project.client, container.id, interactive=not options['-T'])
exit_code = container.wait()
if options['--rm']:
project.client.remove_container(container.id)
sys.exit(exit_code)
def scale(self, project, options): def scale(self, project, options):
""" """
@ -647,6 +606,58 @@ def convergence_strategy_from_opts(options):
return ConvergenceStrategy.changed return ConvergenceStrategy.changed
def run_one_off_container(container_options, project, service, options):
if not options['--no-deps']:
deps = service.get_linked_service_names()
if deps:
project.up(
service_names=deps,
start_deps=True,
strategy=ConvergenceStrategy.never)
if project.use_networking:
project.ensure_network_exists()
try:
container = service.create_container(
quiet=True,
one_off=True,
**container_options)
except APIError:
legacy.check_for_legacy_containers(
project.client,
project.name,
[service.name],
allow_one_off=False)
raise
if options['-d']:
container.start()
print(container.name)
return
def remove_container(force=False):
if options['--rm']:
project.client.remove_container(container.id, force=True)
def force_shutdown(signal, frame):
project.client.kill(container.id)
remove_container(force=True)
sys.exit(2)
def shutdown(signal, frame):
set_signal_handler(force_shutdown)
project.client.stop(container.id)
remove_container()
sys.exit(1)
set_signal_handler(shutdown)
dockerpty.start(project.client, container.id, interactive=not options['-T'])
exit_code = container.wait()
remove_container()
sys.exit(exit_code)
def build_log_printer(containers, service_names, monochrome): def build_log_printer(containers, service_names, monochrome):
if service_names: if service_names:
containers = [ containers = [
@ -657,18 +668,25 @@ def build_log_printer(containers, service_names, monochrome):
def attach_to_logs(project, log_printer, service_names, timeout): def attach_to_logs(project, log_printer, service_names, timeout):
print("Attaching to", list_containers(log_printer.containers))
try:
log_printer.run()
finally:
def handler(signal, frame):
project.kill(service_names=service_names)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
def force_shutdown(signal, frame):
project.kill(service_names=service_names)
sys.exit(2)
def shutdown(signal, frame):
set_signal_handler(force_shutdown)
print("Gracefully stopping... (press Ctrl+C again to force)") print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout) project.stop(service_names=service_names, timeout=timeout)
print("Attaching to", list_containers(log_printer.containers))
set_signal_handler(shutdown)
log_printer.run()
def set_signal_handler(handler):
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
def list_containers(containers): def list_containers(containers):
return ", ".join(c.name for c in containers) return ", ".join(c.name for c in containers)

View File

@ -7,10 +7,10 @@ import platform
import ssl import ssl
import subprocess import subprocess
from docker import version as docker_py_version import docker
from six.moves import input from six.moves import input
from .. import __version__ import compose
def yesno(prompt, default=None): def yesno(prompt, default=None):
@ -57,13 +57,32 @@ def is_ubuntu():
def get_version_info(scope): def get_version_info(scope):
versioninfo = 'docker-compose version: %s' % __version__ versioninfo = 'docker-compose version {}, build {}'.format(
compose.__version__,
get_build_version())
if scope == 'compose': if scope == 'compose':
return versioninfo return versioninfo
elif scope == 'full': if scope == 'full':
return versioninfo + '\n' \ return (
+ "docker-py version: %s\n" % docker_py_version \ "{}\n"
+ "%s version: %s\n" % (platform.python_implementation(), platform.python_version()) \ "docker-py version: {}\n"
+ "OpenSSL version: %s" % ssl.OPENSSL_VERSION "{} version: {}\n"
else: "OpenSSL version: {}"
raise RuntimeError('passed unallowed value to `cli.utils.get_version_info`') ).format(
versioninfo,
docker.version,
platform.python_implementation(),
platform.python_version(),
ssl.OPENSSL_VERSION)
raise ValueError("{} is not a valid version scope".format(scope))
def get_build_version():
filename = os.path.join(os.path.dirname(compose.__file__), 'GITSHA')
if not os.path.exists(filename):
return 'unknown'
with open(filename) as fh:
return fh.read().strip()

View File

@ -2,7 +2,6 @@
from .config import ConfigurationError from .config import ConfigurationError
from .config import DOCKER_CONFIG_KEYS from .config import DOCKER_CONFIG_KEYS
from .config import find from .config import find
from .config import get_service_name_from_net
from .config import load from .config import load
from .config import merge_environment from .config import merge_environment
from .config import parse_environment from .config import parse_environment

View File

@ -1,3 +1,5 @@
from __future__ import absolute_import
import codecs import codecs
import logging import logging
import os import os
@ -11,6 +13,12 @@ from .errors import CircularReference
from .errors import ComposeFileNotFound from .errors import ComposeFileNotFound
from .errors import ConfigurationError from .errors import ConfigurationError
from .interpolation import interpolate_environment_variables from .interpolation import interpolate_environment_variables
from .sort_services import get_service_name_from_net
from .sort_services import sort_service_dicts
from .types import parse_extra_hosts
from .types import parse_restart_spec
from .types import VolumeFromSpec
from .types import VolumeSpec
from .validation import validate_against_fields_schema from .validation import validate_against_fields_schema
from .validation import validate_against_service_schema from .validation import validate_against_service_schema
from .validation import validate_extends_file_path from .validation import validate_extends_file_path
@ -67,6 +75,13 @@ ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'external_links', 'external_links',
] ]
DOCKER_VALID_URL_PREFIXES = (
'http://',
'https://',
'git://',
'github.com/',
'git@',
)
SUPPORTED_FILENAMES = [ SUPPORTED_FILENAMES = [
'docker-compose.yml', 'docker-compose.yml',
@ -197,16 +212,20 @@ def load(config_details):
service_dict) service_dict)
resolver = ServiceExtendsResolver(service_config) resolver = ServiceExtendsResolver(service_config)
service_dict = process_service(resolver.run()) service_dict = process_service(resolver.run())
# TODO: move to validate_service()
validate_against_service_schema(service_dict, service_config.name) validate_against_service_schema(service_dict, service_config.name)
validate_paths(service_dict) validate_paths(service_dict)
service_dict = finalize_service(service_config._replace(config=service_dict))
service_dict['name'] = service_config.name service_dict['name'] = service_config.name
return service_dict return service_dict
def build_services(config_file): def build_services(config_file):
return [ return sort_service_dicts([
build_service(config_file.filename, name, service_dict) build_service(config_file.filename, name, service_dict)
for name, service_dict in config_file.config.items() for name, service_dict in config_file.config.items()
] ])
def merge_services(base, override): def merge_services(base, override):
all_service_names = set(base) | set(override) all_service_names = set(base) | set(override)
@ -257,16 +276,11 @@ class ServiceExtendsResolver(object):
def run(self): def run(self):
self.detect_cycle() self.detect_cycle()
service_dict = dict(self.service_config.config) if 'extends' in self.service_config.config:
env = resolve_environment(self.working_dir, self.service_config.config)
if env:
service_dict['environment'] = env
service_dict.pop('env_file', None)
if 'extends' in service_dict:
service_dict = self.resolve_extends(*self.validate_and_construct_extends()) service_dict = self.resolve_extends(*self.validate_and_construct_extends())
return self.service_config._replace(config=service_dict)
return self.service_config._replace(config=service_dict) return self.service_config
def validate_and_construct_extends(self): def validate_and_construct_extends(self):
extends = self.service_config.config['extends'] extends = self.service_config.config['extends']
@ -316,17 +330,13 @@ class ServiceExtendsResolver(object):
return filename return filename
def resolve_environment(working_dir, service_dict): def resolve_environment(service_dict):
"""Unpack any environment variables from an env_file, if set. """Unpack any environment variables from an env_file, if set.
Interpolate environment values if set. Interpolate environment values if set.
""" """
if 'environment' not in service_dict and 'env_file' not in service_dict:
return {}
env = {} env = {}
if 'env_file' in service_dict: for env_file in service_dict.get('env_file', []):
for env_file in get_env_files(working_dir, service_dict): env.update(env_vars_from_file(env_file))
env.update(env_vars_from_file(env_file))
env.update(parse_environment(service_dict.get('environment'))) env.update(parse_environment(service_dict.get('environment')))
return dict(resolve_env_var(k, v) for k, v in six.iteritems(env)) return dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
@ -358,25 +368,57 @@ def validate_ulimits(ulimit_config):
"than 'hard' value".format(ulimit_config)) "than 'hard' value".format(ulimit_config))
# TODO: rename to normalize_service
def process_service(service_config): def process_service(service_config):
working_dir = service_config.working_dir working_dir = service_config.working_dir
service_dict = dict(service_config.config) service_dict = dict(service_config.config)
if 'env_file' in service_dict:
service_dict['env_file'] = [
expand_path(working_dir, path)
for path in to_list(service_dict['env_file'])
]
if 'volumes' in service_dict and service_dict.get('volume_driver') is None: if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict) service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
if 'build' in service_dict: if 'build' in service_dict:
service_dict['build'] = expand_path(working_dir, service_dict['build']) service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
if 'labels' in service_dict: if 'labels' in service_dict:
service_dict['labels'] = parse_labels(service_dict['labels']) service_dict['labels'] = parse_labels(service_dict['labels'])
if 'extra_hosts' in service_dict:
service_dict['extra_hosts'] = parse_extra_hosts(service_dict['extra_hosts'])
# TODO: move to a validate_service()
if 'ulimits' in service_dict: if 'ulimits' in service_dict:
validate_ulimits(service_dict['ulimits']) validate_ulimits(service_dict['ulimits'])
return service_dict return service_dict
def finalize_service(service_config):
service_dict = dict(service_config.config)
if 'environment' in service_dict or 'env_file' in service_dict:
service_dict['environment'] = resolve_environment(service_dict)
service_dict.pop('env_file', None)
if 'volumes_from' in service_dict:
service_dict['volumes_from'] = [
VolumeFromSpec.parse(vf) for vf in service_dict['volumes_from']]
if 'volumes' in service_dict:
service_dict['volumes'] = [
VolumeSpec.parse(v) for v in service_dict['volumes']]
if 'restart' in service_dict:
service_dict['restart'] = parse_restart_spec(service_dict['restart'])
return service_dict
def merge_service_dicts_from_files(base, override): def merge_service_dicts_from_files(base, override):
"""When merging services from multiple files we need to merge the `extends` """When merging services from multiple files we need to merge the `extends`
field. This is not handled by `merge_service_dicts()` which is used to field. This is not handled by `merge_service_dicts()` which is used to
@ -424,7 +466,7 @@ def merge_service_dicts(base, override):
if key in base or key in override: if key in base or key in override:
d[key] = base.get(key, []) + override.get(key, []) d[key] = base.get(key, []) + override.get(key, [])
list_or_string_keys = ['dns', 'dns_search'] list_or_string_keys = ['dns', 'dns_search', 'env_file']
for key in list_or_string_keys: for key in list_or_string_keys:
if key in base or key in override: if key in base or key in override:
@ -445,17 +487,6 @@ def merge_environment(base, override):
return env return env
def get_env_files(working_dir, options):
if 'env_file' not in options:
return {}
env_files = options.get('env_file', [])
if not isinstance(env_files, list):
env_files = [env_files]
return [expand_path(working_dir, path) for path in env_files]
def parse_environment(environment): def parse_environment(environment):
if not environment: if not environment:
return {} return {}
@ -524,11 +555,26 @@ def resolve_volume_path(working_dir, volume):
return container_path return container_path
def resolve_build_path(working_dir, build_path):
if is_url(build_path):
return build_path
return expand_path(working_dir, build_path)
def is_url(build_path):
return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
def validate_paths(service_dict): def validate_paths(service_dict):
if 'build' in service_dict: if 'build' in service_dict:
build_path = service_dict['build'] build_path = service_dict['build']
if not os.path.exists(build_path) or not os.access(build_path, os.R_OK): if (
raise ConfigurationError("build path %s either does not exist or is not accessible." % build_path) not is_url(build_path) and
(not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
):
raise ConfigurationError(
"build path %s either does not exist, is not accessible, "
"or is not a valid URL." % build_path)
def merge_path_mappings(base, override): def merge_path_mappings(base, override):
@ -613,17 +659,6 @@ def to_list(value):
return value return value
def get_service_name_from_net(net_config):
if not net_config:
return
if not net_config.startswith('container:'):
return
_, net_name = net_config.split(':', 1)
return net_name
def load_yaml(filename): def load_yaml(filename):
try: try:
with open(filename, 'r') as fh: with open(filename, 'r') as fh:

View File

@ -6,6 +6,10 @@ class ConfigurationError(Exception):
return self.msg return self.msg
class DependencyError(ConfigurationError):
pass
class CircularReference(ConfigurationError): class CircularReference(ConfigurationError):
def __init__(self, trail): def __init__(self, trail):
self.trail = trail self.trail = trail

View File

@ -37,26 +37,14 @@
"domainname": {"type": "string"}, "domainname": {"type": "string"},
"entrypoint": {"$ref": "#/definitions/string_or_list"}, "entrypoint": {"$ref": "#/definitions/string_or_list"},
"env_file": {"$ref": "#/definitions/string_or_list"}, "env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"environment": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "boolean", "null"],
"format": "environment"
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"expose": { "expose": {
"type": "array", "type": "array",
"items": {"type": ["string", "number"]}, "items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true "uniqueItems": true
}, },
@ -98,16 +86,8 @@
"ports": { "ports": {
"type": "array", "type": "array",
"items": { "items": {
"oneOf": [ "type": ["string", "number"],
{ "format": "ports"
"type": "string",
"format": "ports"
},
{
"type": "number",
"format": "ports"
}
]
}, },
"uniqueItems": true "uniqueItems": true
}, },
@ -165,10 +145,18 @@
"list_or_dict": { "list_or_dict": {
"oneOf": [ "oneOf": [
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}, {
{"type": "object"} "type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "boolean", "null"],
"format": "bool-value-in-mapping"
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
] ]
} }
} }
} }

View File

@ -0,0 +1,55 @@
from compose.config.errors import DependencyError
def get_service_name_from_net(net_config):
if not net_config:
return
if not net_config.startswith('container:'):
return
_, net_name = net_config.split(':', 1)
return net_name
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_names_from_volumes_from(volumes_from):
return [volume_from.source for volume_from in volumes_from]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services

120
compose/config/types.py Normal file
View File

@ -0,0 +1,120 @@
"""
Types for objects parsed from the configuration.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from collections import namedtuple
from compose.config.errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode')):
@classmethod
def parse(cls, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"volume_from {} has incorrect format, should be "
"service[:mode]".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
mode = 'rw'
else:
source, mode = parts
return cls(source, mode)
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
def parse_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, dict):
return dict(extra_hosts_config)
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
# TODO: validate string contains ':' ?
host, ip = extra_hosts_line.split(':')
extra_hosts_dict[host.strip()] = ip.strip()
return extra_hosts_dict
def normalize_paths_for_engine(external_path, internal_path):
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
if not IS_WINDOWS_PLATFORM:
return external_path, internal_path
if external_path:
drive, tail = os.path.splitdrive(external_path)
if drive:
external_path = '/' + drive.lower().rstrip(':') + tail
external_path = external_path.replace('\\', '/')
return external_path, internal_path.replace('\\', '/')
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
@classmethod
def parse(cls, volume_config):
"""Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
if IS_WINDOWS_PLATFORM:
# relative paths in windows expand to include the drive, eg C:\
# so we join the first 2 parts back together to count as one
drive, tail = os.path.splitdrive(volume_config)
parts = tail.split(":")
if drive:
parts[0] = drive + parts[0]
else:
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external, internal = normalize_paths_for_engine(
None,
os.path.normpath(parts[0]))
else:
external, internal = normalize_paths_for_engine(
os.path.normpath(parts[0]),
os.path.normpath(parts[1]))
mode = 'rw'
if len(parts) == 3:
mode = parts[2]
return cls(external, internal, mode)

View File

@ -1,6 +1,7 @@
import json import json
import logging import logging
import os import os
import re
import sys import sys
import six import six
@ -34,22 +35,29 @@ DOCKER_CONFIG_HINTS = {
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]' VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
VALID_EXPOSE_FORMAT = r'^\d+(\/[a-zA-Z]+)?$'
@FormatChecker.cls_checks( @FormatChecker.cls_checks(format="ports", raises=ValidationError)
format="ports",
raises=ValidationError(
"Invalid port formatting, it should be "
"'[[remote_ip:]remote_port:]port[/protocol]'"))
def format_ports(instance): def format_ports(instance):
try: try:
split_port(instance) split_port(instance)
except ValueError: except ValueError as e:
return False raise ValidationError(six.text_type(e))
return True return True
@FormatChecker.cls_checks(format="environment") @FormatChecker.cls_checks(format="expose", raises=ValidationError)
def format_expose(instance):
if isinstance(instance, six.string_types):
if not re.match(VALID_EXPOSE_FORMAT, instance):
raise ValidationError(
"should be of the format 'PORT[/PROTOCOL]'")
return True
@FormatChecker.cls_checks(format="bool-value-in-mapping")
def format_boolean_in_environment(instance): def format_boolean_in_environment(instance):
""" """
Check if there is a boolean in the environment and display a warning. Check if there is a boolean in the environment and display a warning.
@ -184,6 +192,10 @@ def handle_generic_service_error(error, service_name):
config_key, config_key,
required_keys) required_keys)
elif error.cause:
error_msg = six.text_type(error.cause)
msg_format = "Service '{}' configuration key {} is invalid: {}"
elif error.path: elif error.path:
msg_format = "Service '{}' configuration key {} value {}" msg_format = "Service '{}' configuration key {} value {}"
@ -273,7 +285,7 @@ def validate_against_fields_schema(config, filename):
_validate_against_schema( _validate_against_schema(
config, config,
"fields_schema.json", "fields_schema.json",
format_checker=["ports", "environment"], format_checker=["ports", "expose", "bool-value-in-mapping"],
filename=filename) filename=filename)

View File

@ -8,7 +8,7 @@ from docker.errors import APIError
from docker.errors import NotFound from docker.errors import NotFound
from .config import ConfigurationError from .config import ConfigurationError
from .config import get_service_name_from_net from .config.sort_services import get_service_name_from_net
from .const import DEFAULT_TIMEOUT from .const import DEFAULT_TIMEOUT
from .const import LABEL_ONE_OFF from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT from .const import LABEL_PROJECT
@ -18,62 +18,14 @@ from .legacy import check_for_legacy_containers
from .service import ContainerNet from .service import ContainerNet
from .service import ConvergenceStrategy from .service import ConvergenceStrategy
from .service import Net from .service import Net
from .service import parse_volume_from_spec
from .service import Service from .service import Service
from .service import ServiceNet from .service import ServiceNet
from .service import VolumeFromSpec
from .utils import parallel_execute from .utils import parallel_execute
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_names_from_volumes_from(volumes_from):
return [
parse_volume_from_spec(volume_from).source
for volume_from in volumes_from
]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object): class Project(object):
""" """
A collection of services. A collection of services.
@ -101,7 +53,7 @@ class Project(object):
if use_networking: if use_networking:
remove_links(service_dicts) remove_links(service_dicts)
for service_dict in sort_service_dicts(service_dicts): for service_dict in service_dicts:
links = project.get_links(service_dict) links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict) volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict) net = project.get_net(service_dict)
@ -192,16 +144,15 @@ class Project(object):
def get_volumes_from(self, service_dict): def get_volumes_from(self, service_dict):
volumes_from = [] volumes_from = []
if 'volumes_from' in service_dict: if 'volumes_from' in service_dict:
for volume_from_config in service_dict.get('volumes_from', []): for volume_from_spec in service_dict.get('volumes_from', []):
volume_from_spec = parse_volume_from_spec(volume_from_config)
# Get service # Get service
try: try:
service_name = self.get_service(volume_from_spec.source) service = self.get_service(volume_from_spec.source)
volume_from_spec = VolumeFromSpec(service_name, volume_from_spec.mode) volume_from_spec = volume_from_spec._replace(source=service)
except NoSuchService: except NoSuchService:
try: try:
container_name = Container.from_id(self.client, volume_from_spec.source) container = Container.from_id(self.client, volume_from_spec.source)
volume_from_spec = VolumeFromSpec(container_name, volume_from_spec.mode) volume_from_spec = volume_from_spec._replace(source=container)
except APIError: except APIError:
raise ConfigurationError( raise ConfigurationError(
'Service "%s" mounts volumes from "%s", which is ' 'Service "%s" mounts volumes from "%s", which is '
@ -430,7 +381,3 @@ class NoSuchService(Exception):
def __str__(self): def __str__(self):
return self.msg return self.msg
class DependencyError(ConfigurationError):
pass

View File

@ -2,7 +2,6 @@ from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
import logging import logging
import os
import re import re
import sys import sys
from collections import namedtuple from collections import namedtuple
@ -18,9 +17,8 @@ from docker.utils.ports import split_port
from . import __version__ from . import __version__
from .config import DOCKER_CONFIG_KEYS from .config import DOCKER_CONFIG_KEYS
from .config import merge_environment from .config import merge_environment
from .config.validation import VALID_NAME_CHARS from .config.types import VolumeSpec
from .const import DEFAULT_TIMEOUT from .const import DEFAULT_TIMEOUT
from .const import IS_WINDOWS_PLATFORM
from .const import LABEL_CONFIG_HASH from .const import LABEL_CONFIG_HASH
from .const import LABEL_CONTAINER_NUMBER from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_ONE_OFF from .const import LABEL_ONE_OFF
@ -68,10 +66,6 @@ class BuildError(Exception):
self.reason = reason self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception): class NeedsBuildError(Exception):
def __init__(self, service): def __init__(self, service):
self.service = service self.service = service
@ -81,12 +75,6 @@ class NoSuchImageError(Exception):
pass pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
VolumeFromSpec = namedtuple('VolumeFromSpec', 'source mode')
ServiceName = namedtuple('ServiceName', 'project service number') ServiceName = namedtuple('ServiceName', 'project service number')
@ -119,9 +107,6 @@ class Service(object):
net=None, net=None,
**options **options
): ):
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
self.name = name self.name = name
self.client = client self.client = client
self.project = project self.project = project
@ -185,7 +170,7 @@ class Service(object):
c.kill(**options) c.kill(**options)
def restart(self, **options): def restart(self, **options):
for c in self.containers(): for c in self.containers(stopped=True):
log.info("Restarting %s" % c.name) log.info("Restarting %s" % c.name)
c.restart(**options) c.restart(**options)
@ -526,7 +511,7 @@ class Service(object):
# TODO: Implement issue #652 here # TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off) return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943 # TODO: this would benefit from github.com/docker/docker/pull/14699
# to remove the need to inspect every container # to remove the need to inspect every container
def _next_container_number(self, one_off=False): def _next_container_number(self, one_off=False):
containers = filter(None, [ containers = filter(None, [
@ -619,8 +604,7 @@ class Service(object):
if 'volumes' in container_options: if 'volumes' in container_options:
container_options['volumes'] = dict( container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {}) (v.internal, {}) for v in container_options['volumes'])
for v in container_options['volumes'])
container_options['environment'] = merge_environment( container_options['environment'] = merge_environment(
self.options.get('environment'), self.options.get('environment'),
@ -649,58 +633,34 @@ class Service(object):
def _get_container_host_config(self, override_options, one_off=False): def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options) options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig( log_config = LogConfig(
type=options.get('log_driver', ""), type=options.get('log_driver', ""),
config=options.get('log_opt', None) config=options.get('log_opt', None)
) )
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
cgroup_parent = options.get('cgroup_parent', None)
ulimits = build_ulimits(options.get('ulimits', None))
return self.client.create_host_config( return self.client.create_host_config(
links=self._get_links(link_to_self=one_off), links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings, port_bindings=build_port_bindings(options.get('ports') or []),
binds=options.get('binds'), binds=options.get('binds'),
volumes_from=self._get_volumes_from(), volumes_from=self._get_volumes_from(),
privileged=privileged, privileged=options.get('privileged', False),
network_mode=self.net.mode, network_mode=self.net.mode,
devices=devices, devices=options.get('devices'),
dns=dns, dns=options.get('dns'),
dns_search=dns_search, dns_search=options.get('dns_search'),
restart_policy=restart, restart_policy=options.get('restart'),
cap_add=cap_add, cap_add=options.get('cap_add'),
cap_drop=cap_drop, cap_drop=options.get('cap_drop'),
mem_limit=options.get('mem_limit'), mem_limit=options.get('mem_limit'),
memswap_limit=options.get('memswap_limit'), memswap_limit=options.get('memswap_limit'),
ulimits=ulimits, ulimits=build_ulimits(options.get('ulimits')),
log_config=log_config, log_config=log_config,
extra_hosts=extra_hosts, extra_hosts=options.get('extra_hosts'),
read_only=read_only, read_only=options.get('read_only'),
pid_mode=pid, pid_mode=options.get('pid'),
security_opt=security_opt, security_opt=options.get('security_opt'),
ipc_mode=options.get('ipc'), ipc_mode=options.get('ipc'),
cgroup_parent=cgroup_parent cgroup_parent=options.get('cgroup_parent'),
) )
def build(self, no_cache=False, pull=False, force_rm=False): def build(self, no_cache=False, pull=False, force_rm=False):
@ -767,10 +727,28 @@ class Service(object):
return self.options.get('container_name') return self.options.get('container_name')
def specifies_host_port(self): def specifies_host_port(self):
for port in self.options.get('ports', []): def has_host_port(binding):
if ':' in str(port): _, external_bindings = split_port(binding)
# there are no external bindings
if external_bindings is None:
return False
# we only need to check the first binding from the range
external_binding = external_bindings[0]
# non-tuple binding means there is a host port specified
if not isinstance(external_binding, tuple):
return True return True
return False
# extract actual host port from tuple of (host_ip, host_port)
_, host_port = external_binding
if host_port is not None:
return True
return False
return any(has_host_port(binding) for binding in self.options.get('ports', []))
def pull(self, ignore_pull_failures=False): def pull(self, ignore_pull_failures=False):
if 'image' not in self.options: if 'image' not in self.options:
@ -891,11 +869,10 @@ def parse_repository_tag(repo_path):
# Volumes # Volumes
def merge_volume_bindings(volumes_option, previous_container): def merge_volume_bindings(volumes, previous_container):
"""Return a list of volume bindings for a container. Container data volumes """Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container. are replaced by those from the previous container.
""" """
volumes = [parse_volume_spec(volume) for volume in volumes_option or []]
volume_bindings = dict( volume_bindings = dict(
build_volume_binding(volume) build_volume_binding(volume)
for volume in volumes for volume in volumes
@ -917,7 +894,7 @@ def get_container_data_volumes(container, volumes_option):
volumes = [] volumes = []
container_volumes = container.get('Volumes') or {} container_volumes = container.get('Volumes') or {}
image_volumes = [ image_volumes = [
parse_volume_spec(volume) VolumeSpec.parse(volume)
for volume in for volume in
container.image_config['ContainerConfig'].get('Volumes') or {} container.image_config['ContainerConfig'].get('Volumes') or {}
] ]
@ -945,7 +922,10 @@ def warn_on_masked_volume(volumes_option, container_volumes, service):
for volume in container_volumes) for volume in container_volumes)
for volume in volumes_option: for volume in volumes_option:
if container_volumes.get(volume.internal) != volume.external: if (
volume.internal in container_volumes and
container_volumes.get(volume.internal) != volume.external
):
log.warn(( log.warn((
"Service \"{service}\" is using volume \"{volume}\" from the " "Service \"{service}\" is using volume \"{volume}\" from the "
"previous container. Host mapping \"{host_path}\" has no effect. " "previous container. Host mapping \"{host_path}\" has no effect. "
@ -961,56 +941,6 @@ def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec) return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def normalize_paths_for_engine(external_path, internal_path):
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
if not IS_WINDOWS_PLATFORM:
return external_path, internal_path
if external_path:
drive, tail = os.path.splitdrive(external_path)
if drive:
external_path = '/' + drive.lower().rstrip(':') + tail
external_path = external_path.replace('\\', '/')
return external_path, internal_path.replace('\\', '/')
def parse_volume_spec(volume_config):
"""
Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
if IS_WINDOWS_PLATFORM:
# relative paths in windows expand to include the drive, eg C:\
# so we join the first 2 parts back together to count as one
drive, tail = os.path.splitdrive(volume_config)
parts = tail.split(":")
if drive:
parts[0] = drive + parts[0]
else:
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external, internal = normalize_paths_for_engine(None, os.path.normpath(parts[0]))
else:
external, internal = normalize_paths_for_engine(os.path.normpath(parts[0]), os.path.normpath(parts[1]))
mode = 'rw'
if len(parts) == 3:
mode = parts[2]
return VolumeSpec(external, internal, mode)
def build_volume_from(volume_from_spec): def build_volume_from(volume_from_spec):
""" """
volume_from can be either a service or a container. We want to return the volume_from can be either a service or a container. We want to return the
@ -1027,21 +957,6 @@ def build_volume_from(volume_from_spec):
return ["{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)] return ["{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)]
def parse_volume_from_spec(volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 2:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_from_config)
if len(parts) == 1:
source = parts[0]
mode = 'rw'
else:
source, mode = parts
return VolumeFromSpec(source, mode)
# Labels # Labels
@ -1058,24 +973,6 @@ def build_container_labels(label_options, service_labels, number, config_hash):
return labels return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Ulimits # Ulimits
@ -1092,31 +989,3 @@ def build_ulimits(ulimit_config):
ulimits.append(ulimit_dict) ulimits.append(ulimit_dict)
return ulimits return ulimits
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)

View File

@ -102,7 +102,7 @@ def stream_as_text(stream):
def line_splitter(buffer, separator=u'\n'): def line_splitter(buffer, separator=u'\n'):
index = buffer.find(six.text_type(separator)) index = buffer.find(six.text_type(separator))
if index == -1: if index == -1:
return None, None return None
return buffer[:index + 1], buffer[index + 1:] return buffer[:index + 1], buffer[index + 1:]
@ -120,11 +120,11 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
for data in stream_as_text(stream): for data in stream_as_text(stream):
buffered += data buffered += data
while True: while True:
item, rest = splitter(buffered) buffer_split = splitter(buffered)
if not item: if buffer_split is None:
break break
buffered = rest item, buffered = buffer_split
yield item yield item
if buffered: if buffered:
@ -140,7 +140,7 @@ def json_splitter(buffer):
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():] rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest return obj, rest
except ValueError: except ValueError:
return None, None return None
def json_stream(stream): def json_stream(stream):
@ -148,7 +148,7 @@ def json_stream(stream):
This handles streams which are inconsistently buffered (some entries may This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not). be newline delimited, and others are not).
""" """
return split_buffer(stream_as_text(stream), json_splitter, json_decoder.decode) return split_buffer(stream, json_splitter, json_decoder.decode)
def write_out_msg(stream, lines, msg_index, msg, status="done"): def write_out_msg(stream, lines, msg_index, msg, status="done"):

View File

@ -9,18 +9,32 @@ a = Analysis(['bin/docker-compose'],
runtime_hooks=None, runtime_hooks=None,
cipher=block_cipher) cipher=block_cipher)
pyz = PYZ(a.pure, pyz = PYZ(a.pure, cipher=block_cipher)
cipher=block_cipher)
exe = EXE(pyz, exe = EXE(pyz,
a.scripts, a.scripts,
a.binaries, a.binaries,
a.zipfiles, a.zipfiles,
a.datas, a.datas,
[('compose/config/fields_schema.json', 'compose/config/fields_schema.json', 'DATA')], [
[('compose/config/service_schema.json', 'compose/config/service_schema.json', 'DATA')], (
'compose/config/fields_schema.json',
'compose/config/fields_schema.json',
'DATA'
),
(
'compose/config/service_schema.json',
'compose/config/service_schema.json',
'DATA'
),
(
'compose/GITSHA',
'compose/GITSHA',
'DATA'
)
],
name='docker-compose', name='docker-compose',
debug=False, debug=False,
strip=None, strip=None,
upx=True, upx=True,
console=True ) console=True)

View File

@ -31,15 +31,18 @@ definition.
### build ### build
Path to a directory containing a Dockerfile. When the value supplied is a Either a path to a directory containing a Dockerfile, or a url to a git repository.
relative path, it is interpreted as relative to the location of the yml file
itself. This directory is also the build context that is sent to the Docker daemon. When the value supplied is a relative path, it is interpreted as relative to the
location of the Compose file. This directory is also the build context that is
sent to the Docker daemon.
Compose will build and tag it with a generated name, and use that image thereafter. Compose will build and tag it with a generated name, and use that image thereafter.
build: /path/to/build/dir build: /path/to/build/dir
Using `build` together with `image` is not allowed. Attempting to do so results in an error. Using `build` together with `image` is not allowed. Attempting to do so results in
an error.
### cap_add, cap_drop ### cap_add, cap_drop
@ -105,8 +108,10 @@ Custom DNS search domains. Can be a single value or a list.
Alternate Dockerfile. Alternate Dockerfile.
Compose will use an alternate file to build with. Compose will use an alternate file to build with. A build path must also be
specified using the `build` key.
build: /path/to/build/dir
dockerfile: Dockerfile-alternate dockerfile: Dockerfile-alternate
Using `dockerfile` together with `image` is not allowed. Attempting to do so results in an error. Using `dockerfile` together with `image` is not allowed. Attempting to do so results in an error.

139
docs/faq.md Normal file
View File

@ -0,0 +1,139 @@
<!--[metadata]>
+++
title = "Frequently Asked Questions"
description = "Docker Compose FAQ"
keywords = "documentation, docs, docker, compose, faq"
[menu.main]
parent="smn_workw_compose"
weight=9
+++
<![end-metadata]-->
# Frequently asked questions
If you dont see your question here, feel free to drop by `#docker-compose` on
freenode IRC and ask the community.
## Why do my services take 10 seconds to stop?
Compose stop attempts to stop a container by sending a `SIGTERM`. It then waits
for a [default timeout of 10 seconds](./reference/stop.md). After the timeout,
a `SIGKILL` is sent to the container to forcefully kill it. If you
are waiting for this timeout, it means that your containers aren't shutting down
when they receive the `SIGTERM` signal.
There has already been a lot written about this problem of
[processes handling signals](https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86)
in containers.
To fix this problem, try the following:
* Make sure you're using the JSON form of `CMD` and `ENTRYPOINT`
in your Dockerfile.
For example use `["program", "arg1", "arg2"]` not `"program arg1 arg2"`.
Using the string form causes Docker to run your process using `bash` which
doesn't handle signals properly. Compose always uses the JSON form, so don't
worry if you override the command or entrypoint in your Compose file.
* If you are able, modify the application that you're running to
add an explicit signal handler for `SIGTERM`.
* If you can't modify the application, wrap the application in a lightweight init
system (like [s6](http://skarnet.org/software/s6/)) or a signal proxy (like
[dumb-init](https://github.com/Yelp/dumb-init) or
[tini](https://github.com/krallin/tini)). Either of these wrappers take care of
handling `SIGTERM` properly.
## How do I run multiple copies of a Compose file on the same host?
Compose uses the project name to create unique identifiers for all of a
project's containers and other resources. To run multiple copies of a project,
set a custom project name using the [`-p` command line
option](./reference/docker-compose.md) or the [`COMPOSE_PROJECT_NAME`
environment variable](./reference/overview.md#compose-project-name).
## What's the difference between `up`, `run`, and `start`?
Typically, you want `docker-compose up`. Use `up` to start or restart all the
services defined in a `docker-compose.yml`. In the default "attached"
mode, you'll see all the logs from all the containers. In "detached" mode (`-d`),
Compose exits after starting the containers, but the containers continue to run
in the background.
The `docker-compose run` command is for running "one-off" or "adhoc" tasks. It
requires the service name you want to run and only starts containers for services
that the running service depends on. Use `run` to run tests or perform
an administrative task such as removing or adding data to a data volume
container. The `run` command acts like `docker run -ti` in that it opens an
interactive terminal to the container and returns an exit status matching the
exit status of the process in the container.
The `docker-compose start` command is useful only to restart containers
that were previously created, but were stopped. It never creates new
containers.
## Can I use json instead of yaml for my Compose file?
Yes. [Yaml is a superset of json](http://stackoverflow.com/a/1729545/444646) so
any JSON file should be valid Yaml. To use a JSON file with Compose,
specify the filename to use, for example:
```bash
docker-compose -f docker-compose.json up
```
## How do I get Compose to wait for my database to be ready before starting my application?
Unfortunately, Compose won't do that for you but for a good reason.
The problem of waiting for a database to be ready is really just a subset of a
much larger problem of distributed systems. In production, your database could
become unavailable or move hosts at any time. The application needs to be
resilient to these types of failures.
To handle this, the application would attempt to re-establish a connection to
the database after a failure. If the application retries the connection,
it should eventually be able to connect to the database.
To wait for the application to be in a good state, you can implement a
healthcheck. A healthcheck makes a request to the application and checks
the response for a success status code. If it is not successful it waits
for a short period of time, and tries again. After some timeout value, the check
stops trying and report a failure.
If you need to run tests against your application, you can start by running a
healthcheck. Once the healthcheck gets a successful response, you can start
running your tests.
## Should I include my code with `COPY`/`ADD` or a volume?
You can add your code to the image using `COPY` or `ADD` directive in a
`Dockerfile`. This is useful if you need to relocate your code along with the
Docker image, for example when you're sending code to another environment
(production, CI, etc).
You should use a `volume` if you want to make changes to your code and see them
reflected immediately, for example when you're developing code and your server
supports hot code reloading or live-reload.
There may be cases where you'll want to use both. You can have the image
include the code using a `COPY`, and use a `volume` in your Compose file to
include the code from the host during development. The volume overrides
the directory contents of the image.
## Where can I find example compose files?
There are [many examples of Compose files on
github](https://github.com/search?q=in%3Apath+docker-compose.yml+extension%3Ayml&type=Code).
## Compose documentation
- [Installing Compose](install.md)
- [Get started with Django](django.md)
- [Get started with Rails](rails.md)
- [Get started with WordPress](wordpress.md)
- [Command line reference](./reference/index.md)
- [Compose file reference](compose-file.md)

View File

@ -59,6 +59,7 @@ Compose has commands for managing the whole lifecycle of your application:
- [Get started with Django](django.md) - [Get started with Django](django.md)
- [Get started with Rails](rails.md) - [Get started with Rails](rails.md)
- [Get started with WordPress](wordpress.md) - [Get started with WordPress](wordpress.md)
- [Frequently asked questions](faq.md)
- [Command line reference](./reference/index.md) - [Command line reference](./reference/index.md)
- [Compose file reference](compose-file.md) - [Compose file reference](compose-file.md)

View File

@ -39,7 +39,7 @@ which the release page specifies, in your terminal.
The following is an example command illustrating the format: The following is an example command illustrating the format:
curl -L https://github.com/docker/compose/releases/download/1.5.1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
If you have problems installing with `curl`, see If you have problems installing with `curl`, see
[Alternative Install Options](#alternative-install-options). [Alternative Install Options](#alternative-install-options).
@ -54,7 +54,7 @@ which the release page specifies, in your terminal.
7. Test the installation. 7. Test the installation.
$ docker-compose --version $ docker-compose --version
docker-compose version: 1.5.1 docker-compose version: 1.5.2
## Alternative install options ## Alternative install options
@ -70,13 +70,14 @@ to get started.
$ pip install docker-compose $ pip install docker-compose
> **Note:** pip version 6.0 or greater is required
### Install as a container ### Install as a container
Compose can also be run inside a container, from a small bash script wrapper. Compose can also be run inside a container, from a small bash script wrapper.
To install compose as a container run: To install compose as a container run:
$ curl -L https://github.com/docker/compose/releases/download/1.5.1/run.sh > /usr/local/bin/docker-compose $ curl -L https://github.com/docker/compose/releases/download/1.5.2/run.sh > /usr/local/bin/docker-compose
$ chmod +x /usr/local/bin/docker-compose $ chmod +x /usr/local/bin/docker-compose
## Master builds ## Master builds

View File

@ -87,15 +87,18 @@ relative to the current working directory.
The `-f` flag is optional. If you don't provide this flag on the command line, The `-f` flag is optional. If you don't provide this flag on the command line,
Compose traverses the working directory and its subdirectories looking for a Compose traverses the working directory and its subdirectories looking for a
`docker-compose.yml` and a `docker-compose.override.yml` file. You must supply `docker-compose.yml` and a `docker-compose.override.yml` file. You must
at least the `docker-compose.yml` file. If both files are present, Compose supply at least the `docker-compose.yml` file. If both files are present,
combines the two files into a single configuration. The configuration in the Compose combines the two files into a single configuration. The configuration
`docker-compose.override.yml` file is applied over and in addition to the values in the `docker-compose.override.yml` file is applied over and in addition to
in the `docker-compose.yml` file. the values in the `docker-compose.yml` file.
See also the `COMPOSE_FILE` [environment variable](overview.md#compose-file).
Each configuration has a project name. If you supply a `-p` flag, you can Each configuration has a project name. If you supply a `-p` flag, you can
specify a project name. If you don't specify the flag, Compose uses the current specify a project name. If you don't specify the flag, Compose uses the current
directory name. directory name. See also the `COMPOSE_PROJECT_NAME` [environment variable](
overview.md#compose-project-name)
## Where to go next ## Where to go next

View File

@ -32,11 +32,16 @@ Docker command-line client. If you're using `docker-machine`, then the `eval "$(
Sets the project name. This value is prepended along with the service name to the container container on start up. For example, if you project name is `myapp` and it includes two services `db` and `web` then compose starts containers named `myapp_db_1` and `myapp_web_1` respectively. Sets the project name. This value is prepended along with the service name to the container container on start up. For example, if you project name is `myapp` and it includes two services `db` and `web` then compose starts containers named `myapp_db_1` and `myapp_web_1` respectively.
Setting this is optional. If you do not set this, the `COMPOSE_PROJECT_NAME` defaults to the `basename` of the current working directory. Setting this is optional. If you do not set this, the `COMPOSE_PROJECT_NAME`
defaults to the `basename` of the project directory. See also the `-p`
[command-line option](docker-compose.md).
### COMPOSE\_FILE ### COMPOSE\_FILE
Specify the file containing the compose configuration. If not provided, Compose looks for a file named `docker-compose.yml` in the current directory and then each parent directory in succession until a file by that name is found. Specify the file containing the compose configuration. If not provided,
Compose looks for a file named `docker-compose.yml` in the current directory
and then each parent directory in succession until a file by that name is
found. See also the `-f` [command-line option](docker-compose.md).
### COMPOSE\_API\_VERSION ### COMPOSE\_API\_VERSION

View File

@ -6,5 +6,5 @@ enum34==1.0.4
jsonschema==2.5.1 jsonschema==2.5.1
requests==2.7.0 requests==2.7.0
six==1.7.3 six==1.7.3
texttable==0.8.2 texttable==0.8.4
websocket-client==0.32.0 websocket-client==0.32.0

View File

@ -10,6 +10,7 @@ fi
TAG=$1 TAG=$1
VERSION="$(python setup.py --version)" VERSION="$(python setup.py --version)"
./script/write-git-sha
python setup.py sdist python setup.py sdist
cp dist/docker-compose-$VERSION.tar.gz dist/docker-compose-release.tar.gz cp dist/docker-compose-$VERSION.tar.gz dist/docker-compose-release.tar.gz
docker build -t docker/compose:$TAG -f Dockerfile.run . docker build -t docker/compose:$TAG -f Dockerfile.run .

View File

@ -9,4 +9,5 @@ docker build -t "$TAG" . | tail -n 200
docker run \ docker run \
--rm --entrypoint="script/build-linux-inner" \ --rm --entrypoint="script/build-linux-inner" \
-v $(pwd)/dist:/code/dist \ -v $(pwd)/dist:/code/dist \
-v $(pwd)/.git:/code/.git \
"$TAG" "$TAG"

View File

@ -2,13 +2,14 @@
set -ex set -ex
TARGET=dist/docker-compose-Linux-x86_64 TARGET=dist/docker-compose-$(uname -s)-$(uname -m)
VENV=/code/.tox/py27 VENV=/code/.tox/py27
mkdir -p `pwd`/dist mkdir -p `pwd`/dist
chmod 777 `pwd`/dist chmod 777 `pwd`/dist
$VENV/bin/pip install -q -r requirements-build.txt $VENV/bin/pip install -q -r requirements-build.txt
./script/write-git-sha
su -c "$VENV/bin/pyinstaller docker-compose.spec" user su -c "$VENV/bin/pyinstaller docker-compose.spec" user
mv dist/docker-compose $TARGET mv dist/docker-compose $TARGET
$TARGET version $TARGET version

View File

@ -9,6 +9,7 @@ virtualenv -p /usr/local/bin/python venv
venv/bin/pip install -r requirements.txt venv/bin/pip install -r requirements.txt
venv/bin/pip install -r requirements-build.txt venv/bin/pip install -r requirements-build.txt
venv/bin/pip install --no-deps . venv/bin/pip install --no-deps .
./script/write-git-sha
venv/bin/pyinstaller docker-compose.spec venv/bin/pyinstaller docker-compose.spec
mv dist/docker-compose dist/docker-compose-Darwin-x86_64 mv dist/docker-compose dist/docker-compose-Darwin-x86_64
dist/docker-compose-Darwin-x86_64 version dist/docker-compose-Darwin-x86_64 version

View File

@ -47,6 +47,8 @@ virtualenv .\venv
.\venv\Scripts\pip install --no-deps . .\venv\Scripts\pip install --no-deps .
.\venv\Scripts\pip install --allow-external pyinstaller -r requirements-build.txt .\venv\Scripts\pip install --allow-external pyinstaller -r requirements-build.txt
git rev-parse --short HEAD | out-file -encoding ASCII compose\GITSHA
# Build binary # Build binary
# pyinstaller has lots of warnings, so we need to run with ErrorAction = Continue # pyinstaller has lots of warnings, so we need to run with ErrorAction = Continue
$ErrorActionPreference = "Continue" $ErrorActionPreference = "Continue"

View File

@ -57,6 +57,7 @@ docker push docker/compose:$VERSION
echo "Uploading sdist to pypi" echo "Uploading sdist to pypi"
pandoc -f markdown -t rst README.md -o README.rst pandoc -f markdown -t rst README.md -o README.rst
sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst
./script/write-git-sha
python setup.py sdist python setup.py sdist
if [ "$(command -v twine 2> /dev/null)" ]; then if [ "$(command -v twine 2> /dev/null)" ]; then
twine upload ./dist/docker-compose-${VERSION}.tar.gz twine upload ./dist/docker-compose-${VERSION}.tar.gz

View File

@ -15,7 +15,7 @@
set -e set -e
VERSION="1.5.1" VERSION="1.5.2"
IMAGE="docker/compose:$VERSION" IMAGE="docker/compose:$VERSION"
@ -26,7 +26,7 @@ fi
if [ -S "$DOCKER_HOST" ]; then if [ -S "$DOCKER_HOST" ]; then
DOCKER_ADDR="-v $DOCKER_HOST:$DOCKER_HOST -e DOCKER_HOST" DOCKER_ADDR="-v $DOCKER_HOST:$DOCKER_HOST -e DOCKER_HOST"
else else
DOCKER_ADDR="-e DOCKER_HOST" DOCKER_ADDR="-e DOCKER_HOST -e DOCKER_TLS_VERIFY -e DOCKER_CERT_PATH"
fi fi

View File

@ -1,4 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import print_function
import datetime import datetime
import os.path import os.path
import sys import sys
@ -6,4 +8,4 @@ import sys
os.environ['DATE'] = str(datetime.date.today()) os.environ['DATE'] = str(datetime.date.today())
for line in sys.stdin: for line in sys.stdin:
print os.path.expandvars(line), print(os.path.expandvars(line), end='')

7
script/write-git-sha Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
#
# Write the current commit sha to the file GITSHA. This file is included in
# packaging so that `docker-compose version` can include the git sha.
#
set -e
git rev-parse --short HEAD > compose/GITSHA

View File

@ -2,15 +2,20 @@ from __future__ import absolute_import
import os import os
import shlex import shlex
import signal
import subprocess import subprocess
import time
from collections import namedtuple from collections import namedtuple
from operator import attrgetter from operator import attrgetter
from docker import errors
from .. import mock from .. import mock
from compose.cli.command import get_project from compose.cli.command import get_project
from compose.cli.docker_client import docker_client from compose.cli.docker_client import docker_client
from compose.container import Container from compose.container import Container
from tests.integration.testcases import DockerClientTestCase from tests.integration.testcases import DockerClientTestCase
from tests.integration.testcases import pull_busybox
ProcessResult = namedtuple('ProcessResult', 'stdout stderr') ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
@ -20,6 +25,64 @@ BUILD_CACHE_TEXT = 'Using cache'
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest' BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
def start_process(base_dir, options):
proc = subprocess.Popen(
['docker-compose'] + options,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=base_dir)
print("Running process: %s" % proc.pid)
return proc
def wait_on_process(proc, returncode=0):
stdout, stderr = proc.communicate()
if proc.returncode != returncode:
print(stderr.decode('utf-8'))
assert proc.returncode == returncode
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
def wait_on_condition(condition, delay=0.1, timeout=5):
start_time = time.time()
while not condition():
if time.time() - start_time > timeout:
raise AssertionError("Timeout: %s" % condition)
time.sleep(delay)
class ContainerCountCondition(object):
def __init__(self, project, expected):
self.project = project
self.expected = expected
def __call__(self):
return len(self.project.containers()) == self.expected
def __str__(self):
return "waiting for counter count == %s" % self.expected
class ContainerStateCondition(object):
def __init__(self, client, name, running):
self.client = client
self.name = name
self.running = running
# State.Running == true
def __call__(self):
try:
container = self.client.inspect_container(self.name)
return container['State']['Running'] == self.running
except errors.APIError:
return False
def __str__(self):
return "waiting for container to have state %s" % self.expected
class CLITestCase(DockerClientTestCase): class CLITestCase(DockerClientTestCase):
def setUp(self): def setUp(self):
@ -42,17 +105,8 @@ class CLITestCase(DockerClientTestCase):
def dispatch(self, options, project_options=None, returncode=0): def dispatch(self, options, project_options=None, returncode=0):
project_options = project_options or [] project_options = project_options or []
proc = subprocess.Popen( proc = start_process(self.base_dir, project_options + options)
['docker-compose'] + project_options + options, return wait_on_process(proc, returncode=returncode)
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.base_dir)
print("Running process: %s" % proc.pid)
stdout, stderr = proc.communicate()
if proc.returncode != returncode:
print(stderr)
assert proc.returncode == returncode
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
def test_help(self): def test_help(self):
old_base_dir = self.base_dir old_base_dir = self.base_dir
@ -131,6 +185,8 @@ class CLITestCase(DockerClientTestCase):
assert BUILD_PULL_TEXT not in result.stdout assert BUILD_PULL_TEXT not in result.stdout
def test_build_pull(self): def test_build_pull(self):
# Make sure we have the latest busybox already
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/simple-dockerfile' self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'], None) self.dispatch(['build', 'simple'], None)
@ -139,6 +195,8 @@ class CLITestCase(DockerClientTestCase):
assert BUILD_PULL_TEXT in result.stdout assert BUILD_PULL_TEXT in result.stdout
def test_build_no_cache_pull(self): def test_build_no_cache_pull(self):
# Make sure we have the latest busybox already
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/simple-dockerfile' self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple']) self.dispatch(['build', 'simple'])
@ -291,7 +349,7 @@ class CLITestCase(DockerClientTestCase):
returncode=1) returncode=1)
def test_up_with_timeout(self): def test_up_with_timeout(self):
self.dispatch(['up', '-d', '-t', '1'], None) self.dispatch(['up', '-d', '-t', '1'])
service = self.project.get_service('simple') service = self.project.get_service('simple')
another = self.project.get_service('another') another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1) self.assertEqual(len(service.containers()), 1)
@ -303,6 +361,20 @@ class CLITestCase(DockerClientTestCase):
self.assertFalse(config['AttachStdout']) self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin']) self.assertFalse(config['AttachStdin'])
def test_up_handles_sigint(self):
proc = start_process(self.base_dir, ['up', '-t', '2'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerCountCondition(self.project, 0))
def test_up_handles_sigterm(self):
proc = start_process(self.base_dir, ['up', '-t', '2'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerCountCondition(self.project, 0))
def test_run_service_without_links(self): def test_run_service_without_links(self):
self.base_dir = 'tests/fixtures/links-composefile' self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'console', '/bin/true']) self.dispatch(['run', 'console', '/bin/true'])
@ -508,6 +580,32 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(networks), 1) self.assertEqual(len(networks), 1)
self.assertEqual(container.human_readable_command, u'true') self.assertEqual(container.human_readable_command, u'true')
def test_run_handles_sigint(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=True))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=False))
def test_run_handles_sigterm(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=True))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=False))
def test_rm(self): def test_rm(self):
service = self.project.get_service('simple') service = self.project.get_service('simple')
service.create_container() service.create_container()
@ -597,6 +695,15 @@ class CLITestCase(DockerClientTestCase):
started_at, started_at,
) )
def test_restart_stopped_container(self):
service = self.project.get_service('simple')
container = service.create_container()
container.start()
container.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['restart', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=False)), 1)
def test_scale(self): def test_scale(self):
project = self.project project = self.project

View File

@ -3,12 +3,13 @@ from __future__ import unicode_literals
from .testcases import DockerClientTestCase from .testcases import DockerClientTestCase
from compose.cli.docker_client import docker_client from compose.cli.docker_client import docker_client
from compose.config import config from compose.config import config
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import LABEL_PROJECT from compose.const import LABEL_PROJECT
from compose.container import Container from compose.container import Container
from compose.project import Project from compose.project import Project
from compose.service import ConvergenceStrategy from compose.service import ConvergenceStrategy
from compose.service import Net from compose.service import Net
from compose.service import VolumeFromSpec
def build_service_dicts(service_config): def build_service_dicts(service_config):
@ -214,7 +215,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up(self): def test_project_up(self):
web = self.create_service('web') web = self.create_service('web')
db = self.create_service('db', volumes=['/var/db']) db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
project = Project('composetest', [web, db], self.client) project = Project('composetest', [web, db], self.client)
project.start() project.start()
self.assertEqual(len(project.containers()), 0) self.assertEqual(len(project.containers()), 0)
@ -238,7 +239,7 @@ class ProjectTest(DockerClientTestCase):
def test_recreate_preserves_volumes(self): def test_recreate_preserves_volumes(self):
web = self.create_service('web') web = self.create_service('web')
db = self.create_service('db', volumes=['/etc']) db = self.create_service('db', volumes=[VolumeSpec.parse('/etc')])
project = Project('composetest', [web, db], self.client) project = Project('composetest', [web, db], self.client)
project.start() project.start()
self.assertEqual(len(project.containers()), 0) self.assertEqual(len(project.containers()), 0)
@ -257,7 +258,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up_with_no_recreate_running(self): def test_project_up_with_no_recreate_running(self):
web = self.create_service('web') web = self.create_service('web')
db = self.create_service('db', volumes=['/var/db']) db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
project = Project('composetest', [web, db], self.client) project = Project('composetest', [web, db], self.client)
project.start() project.start()
self.assertEqual(len(project.containers()), 0) self.assertEqual(len(project.containers()), 0)
@ -277,7 +278,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up_with_no_recreate_stopped(self): def test_project_up_with_no_recreate_stopped(self):
web = self.create_service('web') web = self.create_service('web')
db = self.create_service('db', volumes=['/var/db']) db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
project = Project('composetest', [web, db], self.client) project = Project('composetest', [web, db], self.client)
project.start() project.start()
self.assertEqual(len(project.containers()), 0) self.assertEqual(len(project.containers()), 0)
@ -316,7 +317,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up_starts_links(self): def test_project_up_starts_links(self):
console = self.create_service('console') console = self.create_service('console')
db = self.create_service('db', volumes=['/var/db']) db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
web = self.create_service('web', links=[(db, 'db')]) web = self.create_service('web', links=[(db, 'db')])
project = Project('composetest', [web, db, console], self.client) project = Project('composetest', [web, db, console], self.client)

View File

@ -3,13 +3,17 @@ from __future__ import unicode_literals
from .. import mock from .. import mock
from .testcases import DockerClientTestCase from .testcases import DockerClientTestCase
from compose.config.types import VolumeSpec
from compose.project import Project from compose.project import Project
from compose.service import ConvergenceStrategy from compose.service import ConvergenceStrategy
class ResilienceTest(DockerClientTestCase): class ResilienceTest(DockerClientTestCase):
def setUp(self): def setUp(self):
self.db = self.create_service('db', volumes=['/var/db'], command='top') self.db = self.create_service(
'db',
volumes=[VolumeSpec.parse('/var/db')],
command='top')
self.project = Project('composetest', [self.db], self.client) self.project = Project('composetest', [self.db], self.client)
container = self.db.create_container() container = self.db.create_container()

View File

@ -14,6 +14,8 @@ from .. import mock
from .testcases import DockerClientTestCase from .testcases import DockerClientTestCase
from .testcases import pull_busybox from .testcases import pull_busybox
from compose import __version__ from compose import __version__
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import LABEL_CONFIG_HASH from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_CONTAINER_NUMBER from compose.const import LABEL_CONTAINER_NUMBER
from compose.const import LABEL_ONE_OFF from compose.const import LABEL_ONE_OFF
@ -21,13 +23,10 @@ from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE from compose.const import LABEL_SERVICE
from compose.const import LABEL_VERSION from compose.const import LABEL_VERSION
from compose.container import Container from compose.container import Container
from compose.service import build_extra_hosts
from compose.service import ConfigError
from compose.service import ConvergencePlan from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy from compose.service import ConvergenceStrategy
from compose.service import Net from compose.service import Net
from compose.service import Service from compose.service import Service
from compose.service import VolumeFromSpec
def create_and_start_container(service, **override_options): def create_and_start_container(service, **override_options):
@ -116,7 +115,7 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(container.name, 'composetest_db_run_1') self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_unspecified_volume(self): def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=['/var/db']) service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
container = service.create_container() container = service.create_container()
container.start() container.start()
self.assertIn('/var/db', container.get('Volumes')) self.assertIn('/var/db', container.get('Volumes'))
@ -133,37 +132,6 @@ class ServiceTest(DockerClientTestCase):
container.start() container.start()
self.assertEqual(container.get('HostConfig.CpuShares'), 73) self.assertEqual(container.get('HostConfig.CpuShares'), 73)
def test_build_extra_hosts(self):
# string
self.assertRaises(ConfigError, lambda: build_extra_hosts("www.example.com: 192.168.0.17"))
# list of strings
self.assertEqual(build_extra_hosts(
["www.example.com:192.168.0.17"]),
{'www.example.com': '192.168.0.17'})
self.assertEqual(build_extra_hosts(
["www.example.com: 192.168.0.17"]),
{'www.example.com': '192.168.0.17'})
self.assertEqual(build_extra_hosts(
["www.example.com: 192.168.0.17",
"static.example.com:192.168.0.19",
"api.example.com: 192.168.0.18"]),
{'www.example.com': '192.168.0.17',
'static.example.com': '192.168.0.19',
'api.example.com': '192.168.0.18'})
# list of dictionaries
self.assertRaises(ConfigError, lambda: build_extra_hosts(
[{'www.example.com': '192.168.0.17'},
{'api.example.com': '192.168.0.18'}]))
# dictionaries
self.assertEqual(build_extra_hosts(
{'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'}),
{'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'})
def test_create_container_with_extra_hosts_list(self): def test_create_container_with_extra_hosts_list(self):
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229'] extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts) service = self.create_service('db', extra_hosts=extra_hosts)
@ -209,7 +177,9 @@ class ServiceTest(DockerClientTestCase):
host_path = '/tmp/host-path' host_path = '/tmp/host-path'
container_path = '/container-path' container_path = '/container-path'
service = self.create_service('db', volumes=['%s:%s' % (host_path, container_path)]) service = self.create_service(
'db',
volumes=[VolumeSpec(host_path, container_path, 'rw')])
container = service.create_container() container = service.create_container()
container.start() container.start()
@ -222,11 +192,10 @@ class ServiceTest(DockerClientTestCase):
msg=("Last component differs: %s, %s" % (actual_host_path, host_path))) msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
def test_recreate_preserves_volume_with_trailing_slash(self): def test_recreate_preserves_volume_with_trailing_slash(self):
""" """When the Compose file specifies a trailing slash in the container path, make
When the Compose file specifies a trailing slash in the container path, make
sure we copy the volume over when recreating. sure we copy the volume over when recreating.
""" """
service = self.create_service('data', volumes=['/data/']) service = self.create_service('data', volumes=[VolumeSpec.parse('/data/')])
old_container = create_and_start_container(service) old_container = create_and_start_container(service)
volume_path = old_container.get('Volumes')['/data'] volume_path = old_container.get('Volumes')['/data']
@ -240,7 +209,7 @@ class ServiceTest(DockerClientTestCase):
""" """
host_path = '/tmp/data' host_path = '/tmp/data'
container_path = '/data' container_path = '/data'
volumes = ['{}:{}/'.format(host_path, container_path)] volumes = [VolumeSpec.parse('{}:{}/'.format(host_path, container_path))]
tmp_container = self.client.create_container( tmp_container = self.client.create_container(
'busybox', 'true', 'busybox', 'true',
@ -294,7 +263,7 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service( service = self.create_service(
'db', 'db',
environment={'FOO': '1'}, environment={'FOO': '1'},
volumes=['/etc'], volumes=[VolumeSpec.parse('/etc')],
entrypoint=['top'], entrypoint=['top'],
command=['-d', '1'] command=['-d', '1']
) )
@ -332,7 +301,7 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service( service = self.create_service(
'db', 'db',
environment={'FOO': '1'}, environment={'FOO': '1'},
volumes=['/var/db'], volumes=[VolumeSpec.parse('/var/db')],
entrypoint=['top'], entrypoint=['top'],
command=['-d', '1'] command=['-d', '1']
) )
@ -370,10 +339,8 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(new_container.get('Volumes')['/data'], volume_path) self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
def test_execute_convergence_plan_when_image_volume_masks_config(self): def test_execute_convergence_plan_when_image_volume_masks_config(self):
service = Service( service = self.create_service(
project='composetest', 'db',
name='db',
client=self.client,
build='tests/fixtures/dockerfile-with-volume', build='tests/fixtures/dockerfile-with-volume',
) )
@ -381,7 +348,7 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(list(old_container.get('Volumes').keys()), ['/data']) self.assertEqual(list(old_container.get('Volumes').keys()), ['/data'])
volume_path = old_container.get('Volumes')['/data'] volume_path = old_container.get('Volumes')['/data']
service.options['volumes'] = ['/tmp:/data'] service.options['volumes'] = [VolumeSpec.parse('/tmp:/data')]
with mock.patch('compose.service.log') as mock_log: with mock.patch('compose.service.log') as mock_log:
new_container, = service.execute_convergence_plan( new_container, = service.execute_convergence_plan(
@ -534,6 +501,13 @@ class ServiceTest(DockerClientTestCase):
self.create_service('web', build=text_type(base_dir)).build() self.create_service('web', build=text_type(base_dir)).build()
self.assertEqual(len(self.client.images(name='composetest_web')), 1) self.assertEqual(len(self.client.images(name='composetest_web')), 1)
def test_build_with_git_url(self):
build_url = "https://github.com/dnephin/docker-build-from-url.git"
service = self.create_service('buildwithurl', build=build_url)
self.addCleanup(self.client.remove_image, service.image_name)
service.build()
assert service.image()
def test_start_container_stays_unpriviliged(self): def test_start_container_stays_unpriviliged(self):
service = self.create_service('web') service = self.create_service('web')
container = create_and_start_container(service).inspect() container = create_and_start_container(service).inspect()
@ -779,23 +753,21 @@ class ServiceTest(DockerClientTestCase):
container = create_and_start_container(service) container = create_and_start_container(service)
self.assertIsNone(container.get('HostConfig.Dns')) self.assertIsNone(container.get('HostConfig.Dns'))
def test_dns_single_value(self):
service = self.create_service('web', dns='8.8.8.8')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8'])
def test_dns_list(self): def test_dns_list(self):
service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9']) service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
container = create_and_start_container(service) container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9']) self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
def test_restart_always_value(self): def test_restart_always_value(self):
service = self.create_service('web', restart='always') service = self.create_service('web', restart={'Name': 'always'})
container = create_and_start_container(service) container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always') self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
def test_restart_on_failure_value(self): def test_restart_on_failure_value(self):
service = self.create_service('web', restart='on-failure:5') service = self.create_service('web', restart={
'Name': 'on-failure',
'MaximumRetryCount': 5
})
container = create_and_start_container(service) container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure') self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure')
self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5) self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5)
@ -810,17 +782,7 @@ class ServiceTest(DockerClientTestCase):
container = create_and_start_container(service) container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN']) self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN'])
def test_dns_search_no_value(self): def test_dns_search(self):
service = self.create_service('web')
container = create_and_start_container(service)
self.assertIsNone(container.get('HostConfig.DnsSearch'))
def test_dns_search_single_value(self):
service = self.create_service('web', dns_search='example.com')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.DnsSearch'), ['example.com'])
def test_dns_search_list(self):
service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com']) service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
container = create_and_start_container(service) container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com']) self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com'])
@ -902,22 +864,11 @@ class ServiceTest(DockerClientTestCase):
for pair in expected.items(): for pair in expected.items():
self.assertIn(pair, labels) self.assertIn(pair, labels)
service.kill()
service.remove_stopped()
labels_list = ["%s=%s" % pair for pair in labels_dict.items()]
service = self.create_service('web', labels=labels_list)
labels = create_and_start_container(service).labels.items()
for pair in expected.items():
self.assertIn(pair, labels)
def test_empty_labels(self): def test_empty_labels(self):
labels_list = ['foo', 'bar'] labels_dict = {'foo': '', 'bar': ''}
service = self.create_service('web', labels=labels_dict)
service = self.create_service('web', labels=labels_list)
labels = create_and_start_container(service).labels.items() labels = create_and_start_container(service).labels.items()
for name in labels_list: for name in labels_dict:
self.assertIn((name, ''), labels) self.assertIn((name, ''), labels)
def test_custom_container_name(self): def test_custom_container_name(self):

View File

@ -1,25 +1,19 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from docker import errors
from docker.utils import version_lt from docker.utils import version_lt
from pytest import skip from pytest import skip
from .. import unittest from .. import unittest
from compose.cli.docker_client import docker_client from compose.cli.docker_client import docker_client
from compose.config.config import process_service
from compose.config.config import resolve_environment from compose.config.config import resolve_environment
from compose.config.config import ServiceConfig
from compose.const import LABEL_PROJECT from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output from compose.progress_stream import stream_output
from compose.service import Service from compose.service import Service
def pull_busybox(client): def pull_busybox(client):
try: client.pull('busybox:latest', stream=False)
client.inspect_image('busybox:latest')
except errors.APIError:
client.pull('busybox:latest', stream=False)
class DockerClientTestCase(unittest.TestCase): class DockerClientTestCase(unittest.TestCase):
@ -44,13 +38,11 @@ class DockerClientTestCase(unittest.TestCase):
if 'command' not in kwargs: if 'command' not in kwargs:
kwargs['command'] = ["top"] kwargs['command'] = ["top"]
service_config = ServiceConfig('.', None, name, kwargs) kwargs['environment'] = resolve_environment(kwargs)
options = process_service(service_config) labels = dict(kwargs.setdefault('labels', {}))
options['environment'] = resolve_environment('.', kwargs)
labels = options.setdefault('labels', {})
labels['com.docker.compose.test-name'] = self.id() labels['com.docker.compose.test-name'] = self.id()
return Service(name, client=self.client, project='composetest', **options) return Service(name, client=self.client, project='composetest', **kwargs)
def check_build(self, *args, **kwargs): def check_build(self, *args, **kwargs):
kwargs.setdefault('rm', True) kwargs.setdefault('rm', True)

View File

@ -57,11 +57,11 @@ class CLIMainTestCase(unittest.TestCase):
with mock.patch('compose.cli.main.signal', autospec=True) as mock_signal: with mock.patch('compose.cli.main.signal', autospec=True) as mock_signal:
attach_to_logs(project, log_printer, service_names, timeout) attach_to_logs(project, log_printer, service_names, timeout)
mock_signal.signal.assert_called_once_with(mock_signal.SIGINT, mock.ANY) assert mock_signal.signal.mock_calls == [
mock.call(mock_signal.SIGINT, mock.ANY),
mock.call(mock_signal.SIGTERM, mock.ANY),
]
log_printer.run.assert_called_once_with() log_printer.run.assert_called_once_with()
project.stop.assert_called_once_with(
service_names=service_names,
timeout=timeout)
class SetupConsoleHandlerTestCase(unittest.TestCase): class SetupConsoleHandlerTestCase(unittest.TestCase):

View File

@ -124,7 +124,7 @@ class CLITestCase(unittest.TestCase):
mock_project.get_service.return_value = Service( mock_project.get_service.return_value = Service(
'service', 'service',
client=mock_client, client=mock_client,
restart='always', restart={'Name': 'always', 'MaximumRetryCount': 0},
image='someimage') image='someimage')
command.run(mock_project, { command.run(mock_project, {
'SERVICE': 'service', 'SERVICE': 'service',

View File

@ -10,7 +10,9 @@ import py
import pytest import pytest
from compose.config import config from compose.config import config
from compose.config.config import resolve_environment
from compose.config.errors import ConfigurationError from compose.config.errors import ConfigurationError
from compose.config.types import VolumeSpec
from compose.const import IS_WINDOWS_PLATFORM from compose.const import IS_WINDOWS_PLATFORM
from tests import mock from tests import mock
from tests import unittest from tests import unittest
@ -32,7 +34,7 @@ def service_sort(services):
return sorted(services, key=itemgetter('name')) return sorted(services, key=itemgetter('name'))
def build_config_details(contents, working_dir, filename): def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
return config.ConfigDetails( return config.ConfigDetails(
working_dir, working_dir,
[config.ConfigFile(filename, contents)]) [config.ConfigFile(filename, contents)])
@ -76,7 +78,7 @@ class ConfigTest(unittest.TestCase):
) )
) )
def test_config_invalid_service_names(self): def test_load_config_invalid_service_names(self):
for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']: for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
with pytest.raises(ConfigurationError) as exc: with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details( config.load(build_config_details(
@ -147,7 +149,7 @@ class ConfigTest(unittest.TestCase):
'name': 'web', 'name': 'web',
'build': '/', 'build': '/',
'links': ['db'], 'links': ['db'],
'volumes': ['/home/user/project:/code'], 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
}, },
{ {
'name': 'db', 'name': 'db',
@ -211,7 +213,7 @@ class ConfigTest(unittest.TestCase):
{ {
'name': 'web', 'name': 'web',
'image': 'example/web', 'image': 'example/web',
'volumes': ['/home/user/project:/code'], 'volumes': [VolumeSpec.parse('/home/user/project:/code')],
'labels': {'label': 'one'}, 'labels': {'label': 'one'},
}, },
] ]
@ -231,6 +233,27 @@ class ConfigTest(unittest.TestCase):
assert "service 'bogus' doesn't have any configuration" in exc.exconly() assert "service 'bogus' doesn't have any configuration" in exc.exconly()
assert "In file 'override.yaml'" in exc.exconly() assert "In file 'override.yaml'" in exc.exconly()
def test_load_sorts_in_dependency_order(self):
config_details = build_config_details({
'web': {
'image': 'busybox:latest',
'links': ['db'],
},
'db': {
'image': 'busybox:latest',
'volumes_from': ['volume:ro']
},
'volume': {
'image': 'busybox:latest',
'volumes': ['/tmp'],
}
})
services = config.load(config_details)
assert services[0]['name'] == 'volume'
assert services[1]['name'] == 'db'
assert services[2]['name'] == 'web'
def test_config_valid_service_names(self): def test_config_valid_service_names(self):
for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']: for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
services = config.load( services = config.load(
@ -240,29 +263,6 @@ class ConfigTest(unittest.TestCase):
'common.yml')) 'common.yml'))
assert services[0]['name'] == valid_name assert services[0]['name'] == valid_name
def test_config_invalid_ports_format_validation(self):
expected_error_msg = "Service 'web' configuration key 'ports' contains an invalid type"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
for invalid_ports in [{"1": "8000"}, False, 0, "8000", 8000, ["8000", "8000"]]:
config.load(
build_config_details(
{'web': {'image': 'busybox', 'ports': invalid_ports}},
'working_dir',
'filename.yml'
)
)
def test_config_valid_ports_format_validation(self):
valid_ports = [["8000", "9000"], ["8000/8050"], ["8000"], [8000], ["49153-49154:3002-3003"]]
for ports in valid_ports:
config.load(
build_config_details(
{'web': {'image': 'busybox', 'ports': ports}},
'working_dir',
'filename.yml'
)
)
def test_config_hint(self): def test_config_hint(self):
expected_error_msg = "(did you mean 'privileged'?)" expected_error_msg = "(did you mean 'privileged'?)"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg): with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
@ -512,6 +512,120 @@ class ConfigTest(unittest.TestCase):
assert 'line 3, column 32' in exc.exconly() assert 'line 3, column 32' in exc.exconly()
def test_validate_extra_hosts_invalid(self):
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details({
'web': {
'image': 'alpine',
'extra_hosts': "www.example.com: 192.168.0.17",
}
}))
assert "'extra_hosts' contains an invalid type" in exc.exconly()
def test_validate_extra_hosts_invalid_list(self):
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details({
'web': {
'image': 'alpine',
'extra_hosts': [
{'www.example.com': '192.168.0.17'},
{'api.example.com': '192.168.0.18'}
],
}
}))
assert "which is an invalid type" in exc.exconly()
class PortsTest(unittest.TestCase):
INVALID_PORTS_TYPES = [
{"1": "8000"},
False,
"8000",
8000,
]
NON_UNIQUE_SINGLE_PORTS = [
["8000", "8000"],
]
INVALID_PORT_MAPPINGS = [
["8000-8001:8000"],
]
VALID_SINGLE_PORTS = [
["8000"],
["8000/tcp"],
["8000", "9000"],
[8000],
[8000, 9000],
]
VALID_PORT_MAPPINGS = [
["8000:8050"],
["49153-49154:3002-3003"],
]
def test_config_invalid_ports_type_validation(self):
for invalid_ports in self.INVALID_PORTS_TYPES:
with pytest.raises(ConfigurationError) as exc:
self.check_config({'ports': invalid_ports})
assert "contains an invalid type" in exc.value.msg
def test_config_non_unique_ports_validation(self):
for invalid_ports in self.NON_UNIQUE_SINGLE_PORTS:
with pytest.raises(ConfigurationError) as exc:
self.check_config({'ports': invalid_ports})
assert "non-unique" in exc.value.msg
def test_config_invalid_ports_format_validation(self):
for invalid_ports in self.INVALID_PORT_MAPPINGS:
with pytest.raises(ConfigurationError) as exc:
self.check_config({'ports': invalid_ports})
assert "Port ranges don't match in length" in exc.value.msg
def test_config_valid_ports_format_validation(self):
for valid_ports in self.VALID_SINGLE_PORTS + self.VALID_PORT_MAPPINGS:
self.check_config({'ports': valid_ports})
def test_config_invalid_expose_type_validation(self):
for invalid_expose in self.INVALID_PORTS_TYPES:
with pytest.raises(ConfigurationError) as exc:
self.check_config({'expose': invalid_expose})
assert "contains an invalid type" in exc.value.msg
def test_config_non_unique_expose_validation(self):
for invalid_expose in self.NON_UNIQUE_SINGLE_PORTS:
with pytest.raises(ConfigurationError) as exc:
self.check_config({'expose': invalid_expose})
assert "non-unique" in exc.value.msg
def test_config_invalid_expose_format_validation(self):
# Valid port mappings ARE NOT valid 'expose' entries
for invalid_expose in self.INVALID_PORT_MAPPINGS + self.VALID_PORT_MAPPINGS:
with pytest.raises(ConfigurationError) as exc:
self.check_config({'expose': invalid_expose})
assert "should be of the format" in exc.value.msg
def test_config_valid_expose_format_validation(self):
# Valid single ports ARE valid 'expose' entries
for valid_expose in self.VALID_SINGLE_PORTS:
self.check_config({'expose': valid_expose})
def check_config(self, cfg):
config.load(
build_config_details(
{'web': dict(image='busybox', **cfg)},
'working_dir',
'filename.yml'
)
)
class InterpolationTest(unittest.TestCase): class InterpolationTest(unittest.TestCase):
@mock.patch.dict(os.environ) @mock.patch.dict(os.environ)
@ -603,14 +717,11 @@ class VolumeConfigTest(unittest.TestCase):
@mock.patch.dict(os.environ) @mock.patch.dict(os.environ)
def test_volume_binding_with_environment_variable(self): def test_volume_binding_with_environment_variable(self):
os.environ['VOLUME_PATH'] = '/host/path' os.environ['VOLUME_PATH'] = '/host/path'
d = config.load( d = config.load(build_config_details(
build_config_details( {'foo': {'build': '.', 'volumes': ['${VOLUME_PATH}:/container/path']}},
{'foo': {'build': '.', 'volumes': ['${VOLUME_PATH}:/container/path']}}, '.',
'.', ))[0]
None, self.assertEqual(d['volumes'], [VolumeSpec.parse('/host/path:/container/path')])
)
)[0]
self.assertEqual(d['volumes'], ['/host/path:/container/path'])
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths') @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
@mock.patch.dict(os.environ) @mock.patch.dict(os.environ)
@ -931,65 +1042,54 @@ class EnvTest(unittest.TestCase):
os.environ['FILE_DEF_EMPTY'] = 'E2' os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3' os.environ['ENV_DEF'] = 'E3'
service_dict = make_service_dict( service_dict = {
'foo', { 'build': '.',
'build': '.', 'environment': {
'environment': { 'FILE_DEF': 'F1',
'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '',
'FILE_DEF_EMPTY': '', 'ENV_DEF': None,
'ENV_DEF': None, 'NO_DEF': None
'NO_DEF': None
},
}, },
'tests/' }
)
self.assertEqual( self.assertEqual(
service_dict['environment'], resolve_environment(service_dict),
{'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}, {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''},
) )
def test_env_from_file(self): def test_resolve_environment_from_env_file(self):
service_dict = make_service_dict(
'foo',
{'build': '.', 'env_file': 'one.env'},
'tests/fixtures/env',
)
self.assertEqual( self.assertEqual(
service_dict['environment'], resolve_environment({'env_file': ['tests/fixtures/env/one.env']}),
{'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'}, {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'},
) )
def test_env_from_multiple_files(self): def test_resolve_environment_with_multiple_env_files(self):
service_dict = make_service_dict( service_dict = {
'foo', 'env_file': [
{'build': '.', 'env_file': ['one.env', 'two.env']}, 'tests/fixtures/env/one.env',
'tests/fixtures/env', 'tests/fixtures/env/two.env'
) ]
}
self.assertEqual( self.assertEqual(
service_dict['environment'], resolve_environment(service_dict),
{'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}, {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'},
) )
def test_env_nonexistent_file(self): def test_resolve_environment_nonexistent_file(self):
options = {'env_file': 'nonexistent.env'} with pytest.raises(ConfigurationError) as exc:
self.assertRaises( config.load(build_config_details(
ConfigurationError, {'foo': {'image': 'example', 'env_file': 'nonexistent.env'}},
lambda: make_service_dict('foo', options, 'tests/fixtures/env'), working_dir='tests/fixtures/env'))
)
assert 'Couldn\'t find env file' in exc.exconly()
assert 'nonexistent.env' in exc.exconly()
@mock.patch.dict(os.environ) @mock.patch.dict(os.environ)
def test_resolve_environment_from_file(self): def test_resolve_environment_from_env_file_with_empty_values(self):
os.environ['FILE_DEF'] = 'E1' os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2' os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3' os.environ['ENV_DEF'] = 'E3'
service_dict = make_service_dict(
'foo',
{'build': '.', 'env_file': 'resolve.env'},
'tests/fixtures/env',
)
self.assertEqual( self.assertEqual(
service_dict['environment'], resolve_environment({'env_file': ['tests/fixtures/env/resolve.env']}),
{ {
'FILE_DEF': u'bär', 'FILE_DEF': u'bär',
'FILE_DEF_EMPTY': '', 'FILE_DEF_EMPTY': '',
@ -1008,19 +1108,21 @@ class EnvTest(unittest.TestCase):
build_config_details( build_config_details(
{'foo': {'build': '.', 'volumes': ['$HOSTENV:$CONTAINERENV']}}, {'foo': {'build': '.', 'volumes': ['$HOSTENV:$CONTAINERENV']}},
"tests/fixtures/env", "tests/fixtures/env",
None,
) )
)[0] )[0]
self.assertEqual(set(service_dict['volumes']), set(['/tmp:/host/tmp'])) self.assertEqual(
set(service_dict['volumes']),
set([VolumeSpec.parse('/tmp:/host/tmp')]))
service_dict = config.load( service_dict = config.load(
build_config_details( build_config_details(
{'foo': {'build': '.', 'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}}, {'foo': {'build': '.', 'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}},
"tests/fixtures/env", "tests/fixtures/env",
None,
) )
)[0] )[0]
self.assertEqual(set(service_dict['volumes']), set(['/opt/tmp:/opt/host/tmp'])) self.assertEqual(
set(service_dict['volumes']),
set([VolumeSpec.parse('/opt/tmp:/opt/host/tmp')]))
def load_from_filename(filename): def load_from_filename(filename):
@ -1267,8 +1369,14 @@ class ExtendsTest(unittest.TestCase):
dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml') dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml')
paths = [ paths = [
'%s:/foo' % os.path.abspath('tests/fixtures/volume-path/common/foo'), VolumeSpec(
'%s:/bar' % os.path.abspath('tests/fixtures/volume-path/bar'), os.path.abspath('tests/fixtures/volume-path/common/foo'),
'/foo',
'rw'),
VolumeSpec(
os.path.abspath('tests/fixtures/volume-path/bar'),
'/bar',
'rw')
] ]
self.assertEqual(set(dicts[0]['volumes']), set(paths)) self.assertEqual(set(dicts[0]['volumes']), set(paths))
@ -1317,6 +1425,70 @@ class ExtendsTest(unittest.TestCase):
}, },
])) ]))
def test_extends_with_environment_and_env_files(self):
tmpdir = py.test.ensuretemp('test_extends_with_environment')
self.addCleanup(tmpdir.remove)
commondir = tmpdir.mkdir('common')
commondir.join('base.yml').write("""
app:
image: 'example/app'
env_file:
- 'envs'
environment:
- SECRET
- TEST_ONE=common
- TEST_TWO=common
""")
tmpdir.join('docker-compose.yml').write("""
ext:
extends:
file: common/base.yml
service: app
env_file:
- 'envs'
environment:
- THING
- TEST_ONE=top
""")
commondir.join('envs').write("""
COMMON_ENV_FILE
TEST_ONE=common-env-file
TEST_TWO=common-env-file
TEST_THREE=common-env-file
TEST_FOUR=common-env-file
""")
tmpdir.join('envs').write("""
TOP_ENV_FILE
TEST_ONE=top-env-file
TEST_TWO=top-env-file
TEST_THREE=top-env-file
""")
expected = [
{
'name': 'ext',
'image': 'example/app',
'environment': {
'SECRET': 'secret',
'TOP_ENV_FILE': 'secret',
'COMMON_ENV_FILE': 'secret',
'THING': 'thing',
'TEST_ONE': 'top',
'TEST_TWO': 'common',
'TEST_THREE': 'top-env-file',
'TEST_FOUR': 'common-env-file',
},
},
]
with mock.patch.dict(os.environ):
os.environ['SECRET'] = 'secret'
os.environ['THING'] = 'thing'
os.environ['COMMON_ENV_FILE'] = 'secret'
os.environ['TOP_ENV_FILE'] = 'secret'
config = load_from_filename(str(tmpdir.join('docker-compose.yml')))
assert config == expected
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
class ExpandPathTest(unittest.TestCase): class ExpandPathTest(unittest.TestCase):
@ -1393,6 +1565,34 @@ class BuildPathTest(unittest.TestCase):
service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml') service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml')
self.assertEquals(service_dict, [{'name': 'foo', 'build': self.abs_context_path}]) self.assertEquals(service_dict, [{'name': 'foo', 'build': self.abs_context_path}])
def test_valid_url_in_build_path(self):
valid_urls = [
'git://github.com/docker/docker',
'git@github.com:docker/docker.git',
'git@bitbucket.org:atlassianlabs/atlassian-docker.git',
'https://github.com/docker/docker.git',
'http://github.com/docker/docker.git',
'github.com/docker/docker.git',
]
for valid_url in valid_urls:
service_dict = config.load(build_config_details({
'validurl': {'build': valid_url},
}, '.', None))
assert service_dict[0]['build'] == valid_url
def test_invalid_url_in_build_path(self):
invalid_urls = [
'example.com/bogus',
'ftp://example.com/',
'/path/does/not/exist',
]
for invalid_url in invalid_urls:
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details({
'invalidurl': {'build': invalid_url},
}, '.', None))
assert 'build path' in exc.exconly()
class GetDefaultConfigFilesTestCase(unittest.TestCase): class GetDefaultConfigFilesTestCase(unittest.TestCase):

View File

@ -1,6 +1,7 @@
from .. import unittest from compose.config.errors import DependencyError
from compose.project import DependencyError from compose.config.sort_services import sort_service_dicts
from compose.project import sort_service_dicts from compose.config.types import VolumeFromSpec
from tests import unittest
class SortServiceTest(unittest.TestCase): class SortServiceTest(unittest.TestCase):
@ -73,7 +74,7 @@ class SortServiceTest(unittest.TestCase):
}, },
{ {
'name': 'parent', 'name': 'parent',
'volumes_from': ['child'] 'volumes_from': [VolumeFromSpec('child', 'rw')]
}, },
{ {
'links': ['parent'], 'links': ['parent'],
@ -116,7 +117,7 @@ class SortServiceTest(unittest.TestCase):
}, },
{ {
'name': 'parent', 'name': 'parent',
'volumes_from': ['child'] 'volumes_from': [VolumeFromSpec('child', 'ro')]
}, },
{ {
'name': 'child' 'name': 'child'
@ -141,7 +142,7 @@ class SortServiceTest(unittest.TestCase):
}, },
{ {
'name': 'two', 'name': 'two',
'volumes_from': ['one'] 'volumes_from': [VolumeFromSpec('one', 'rw')]
}, },
{ {
'name': 'one' 'name': 'one'

View File

@ -0,0 +1,66 @@
import pytest
from compose.config.errors import ConfigurationError
from compose.config.types import parse_extra_hosts
from compose.config.types import VolumeSpec
from compose.const import IS_WINDOWS_PLATFORM
def test_parse_extra_hosts_list():
expected = {'www.example.com': '192.168.0.17'}
assert parse_extra_hosts(["www.example.com:192.168.0.17"]) == expected
expected = {'www.example.com': '192.168.0.17'}
assert parse_extra_hosts(["www.example.com: 192.168.0.17"]) == expected
assert parse_extra_hosts([
"www.example.com: 192.168.0.17",
"static.example.com:192.168.0.19",
"api.example.com: 192.168.0.18"
]) == {
'www.example.com': '192.168.0.17',
'static.example.com': '192.168.0.19',
'api.example.com': '192.168.0.18'
}
def test_parse_extra_hosts_dict():
assert parse_extra_hosts({
'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'
}) == {
'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'
}
class TestVolumeSpec(object):
def test_parse_volume_spec_only_one_path(self):
spec = VolumeSpec.parse('/the/volume')
assert spec == (None, '/the/volume', 'rw')
def test_parse_volume_spec_internal_and_external(self):
spec = VolumeSpec.parse('external:interval')
assert spec == ('external', 'interval', 'rw')
def test_parse_volume_spec_with_mode(self):
spec = VolumeSpec.parse('external:interval:ro')
assert spec == ('external', 'interval', 'ro')
spec = VolumeSpec.parse('external:interval:z')
assert spec == ('external', 'interval', 'z')
def test_parse_volume_spec_too_many_parts(self):
with pytest.raises(ConfigurationError) as exc:
VolumeSpec.parse('one:two:three:four')
assert 'has incorrect format' in exc.exconly()
@pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive')
def test_parse_volume_windows_absolute_path(self):
windows_path = "c:\\Users\\me\\Documents\\shiny\\config:\\opt\\shiny\\config:ro"
assert VolumeSpec.parse(windows_path) == (
"/c/Users/me/Documents/shiny/config",
"/opt/shiny/config",
"ro"
)

View File

@ -4,6 +4,7 @@ import docker
from .. import mock from .. import mock
from .. import unittest from .. import unittest
from compose.config.types import VolumeFromSpec
from compose.const import LABEL_SERVICE from compose.const import LABEL_SERVICE
from compose.container import Container from compose.container import Container
from compose.project import Project from compose.project import Project
@ -33,29 +34,6 @@ class ProjectTest(unittest.TestCase):
self.assertEqual(project.get_service('db').name, 'db') self.assertEqual(project.get_service('db').name, 'db')
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest') self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
def test_from_dict_sorts_in_dependency_order(self):
project = Project.from_dicts('composetest', [
{
'name': 'web',
'image': 'busybox:latest',
'links': ['db'],
},
{
'name': 'db',
'image': 'busybox:latest',
'volumes_from': ['volume']
},
{
'name': 'volume',
'image': 'busybox:latest',
'volumes': ['/tmp'],
}
], None)
self.assertEqual(project.services[0].name, 'volume')
self.assertEqual(project.services[1].name, 'db')
self.assertEqual(project.services[2].name, 'web')
def test_from_config(self): def test_from_config(self):
dicts = [ dicts = [
{ {
@ -167,7 +145,7 @@ class ProjectTest(unittest.TestCase):
{ {
'name': 'test', 'name': 'test',
'image': 'busybox:latest', 'image': 'busybox:latest',
'volumes_from': ['aaa'] 'volumes_from': [VolumeFromSpec('aaa', 'rw')]
} }
], self.mock_client) ], self.mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id + ":rw"]) self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id + ":rw"])
@ -190,17 +168,13 @@ class ProjectTest(unittest.TestCase):
{ {
'name': 'test', 'name': 'test',
'image': 'busybox:latest', 'image': 'busybox:latest',
'volumes_from': ['vol'] 'volumes_from': [VolumeFromSpec('vol', 'rw')]
} }
], self.mock_client) ], self.mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name + ":rw"]) self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name + ":rw"])
@mock.patch.object(Service, 'containers') def test_use_volumes_from_service_container(self):
def test_use_volumes_from_service_container(self, mock_return):
container_ids = ['aabbccddee', '12345'] container_ids = ['aabbccddee', '12345']
mock_return.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids]
project = Project.from_dicts('test', [ project = Project.from_dicts('test', [
{ {
@ -210,10 +184,16 @@ class ProjectTest(unittest.TestCase):
{ {
'name': 'test', 'name': 'test',
'image': 'busybox:latest', 'image': 'busybox:latest',
'volumes_from': ['vol'] 'volumes_from': [VolumeFromSpec('vol', 'rw')]
} }
], None) ], None)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_ids[0] + ':rw']) with mock.patch.object(Service, 'containers') as mock_return:
mock_return.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids]
self.assertEqual(
project.get_service('test')._get_volumes_from(),
[container_ids[0] + ':rw'])
def test_net_unset(self): def test_net_unset(self):
project = Project.from_dicts('test', [ project = Project.from_dicts('test', [

View File

@ -2,11 +2,11 @@ from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
import docker import docker
import pytest
from .. import mock from .. import mock
from .. import unittest from .. import unittest
from compose.const import IS_WINDOWS_PLATFORM from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import LABEL_CONFIG_HASH from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_ONE_OFF from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT from compose.const import LABEL_PROJECT
@ -14,7 +14,6 @@ from compose.const import LABEL_SERVICE
from compose.container import Container from compose.container import Container
from compose.service import build_ulimits from compose.service import build_ulimits
from compose.service import build_volume_binding from compose.service import build_volume_binding
from compose.service import ConfigError
from compose.service import ContainerNet from compose.service import ContainerNet
from compose.service import get_container_data_volumes from compose.service import get_container_data_volumes
from compose.service import merge_volume_bindings from compose.service import merge_volume_bindings
@ -22,10 +21,9 @@ from compose.service import NeedsBuildError
from compose.service import Net from compose.service import Net
from compose.service import NoSuchImageError from compose.service import NoSuchImageError
from compose.service import parse_repository_tag from compose.service import parse_repository_tag
from compose.service import parse_volume_spec
from compose.service import Service from compose.service import Service
from compose.service import ServiceNet from compose.service import ServiceNet
from compose.service import VolumeFromSpec from compose.service import warn_on_masked_volume
class ServiceTest(unittest.TestCase): class ServiceTest(unittest.TestCase):
@ -33,11 +31,6 @@ class ServiceTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.mock_client = mock.create_autospec(docker.Client) self.mock_client = mock.create_autospec(docker.Client)
def test_project_validation(self):
self.assertRaises(ConfigError, lambda: Service(name='foo', project='>', image='foo'))
Service(name='foo', project='bar.bar__', image='foo')
def test_containers(self): def test_containers(self):
service = Service('db', self.mock_client, 'myproject', image='foo') service = Service('db', self.mock_client, 'myproject', image='foo')
self.mock_client.containers.return_value = [] self.mock_client.containers.return_value = []
@ -427,6 +420,68 @@ class ServiceTest(unittest.TestCase):
} }
self.assertEqual(config_dict, expected) self.assertEqual(config_dict, expected)
def test_specifies_host_port_with_no_ports(self):
service = Service(
'foo',
image='foo')
self.assertEqual(service.specifies_host_port(), False)
def test_specifies_host_port_with_container_port(self):
service = Service(
'foo',
image='foo',
ports=["2000"])
self.assertEqual(service.specifies_host_port(), False)
def test_specifies_host_port_with_host_port(self):
service = Service(
'foo',
image='foo',
ports=["1000:2000"])
self.assertEqual(service.specifies_host_port(), True)
def test_specifies_host_port_with_host_ip_no_port(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1::2000"])
self.assertEqual(service.specifies_host_port(), False)
def test_specifies_host_port_with_host_ip_and_port(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1:1000:2000"])
self.assertEqual(service.specifies_host_port(), True)
def test_specifies_host_port_with_container_port_range(self):
service = Service(
'foo',
image='foo',
ports=["2000-3000"])
self.assertEqual(service.specifies_host_port(), False)
def test_specifies_host_port_with_host_port_range(self):
service = Service(
'foo',
image='foo',
ports=["1000-2000:2000-3000"])
self.assertEqual(service.specifies_host_port(), True)
def test_specifies_host_port_with_host_ip_no_port_range(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1::2000-3000"])
self.assertEqual(service.specifies_host_port(), False)
def test_specifies_host_port_with_host_ip_and_port_range(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1:1000-2000:2000-3000"])
self.assertEqual(service.specifies_host_port(), True)
def test_get_links_with_networking(self): def test_get_links_with_networking(self):
service = Service( service = Service(
'foo', 'foo',
@ -525,46 +580,12 @@ class ServiceVolumesTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.mock_client = mock.create_autospec(docker.Client) self.mock_client = mock.create_autospec(docker.Client)
def test_parse_volume_spec_only_one_path(self):
spec = parse_volume_spec('/the/volume')
self.assertEqual(spec, (None, '/the/volume', 'rw'))
def test_parse_volume_spec_internal_and_external(self):
spec = parse_volume_spec('external:interval')
self.assertEqual(spec, ('external', 'interval', 'rw'))
def test_parse_volume_spec_with_mode(self):
spec = parse_volume_spec('external:interval:ro')
self.assertEqual(spec, ('external', 'interval', 'ro'))
spec = parse_volume_spec('external:interval:z')
self.assertEqual(spec, ('external', 'interval', 'z'))
def test_parse_volume_spec_too_many_parts(self):
with self.assertRaises(ConfigError):
parse_volume_spec('one:two:three:four')
@pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive')
def test_parse_volume_windows_absolute_path(self):
windows_absolute_path = "c:\\Users\\me\\Documents\\shiny\\config:\\opt\\shiny\\config:ro"
spec = parse_volume_spec(windows_absolute_path)
self.assertEqual(
spec,
(
"/c/Users/me/Documents/shiny/config",
"/opt/shiny/config",
"ro"
)
)
def test_build_volume_binding(self): def test_build_volume_binding(self):
binding = build_volume_binding(parse_volume_spec('/outside:/inside')) binding = build_volume_binding(VolumeSpec.parse('/outside:/inside'))
self.assertEqual(binding, ('/inside', '/outside:/inside:rw')) assert binding == ('/inside', '/outside:/inside:rw')
def test_get_container_data_volumes(self): def test_get_container_data_volumes(self):
options = [parse_volume_spec(v) for v in [ options = [VolumeSpec.parse(v) for v in [
'/host/volume:/host/volume:ro', '/host/volume:/host/volume:ro',
'/new/volume', '/new/volume',
'/existing/volume', '/existing/volume',
@ -588,19 +609,19 @@ class ServiceVolumesTest(unittest.TestCase):
}, has_been_inspected=True) }, has_been_inspected=True)
expected = [ expected = [
parse_volume_spec('/var/lib/docker/aaaaaaaa:/existing/volume:rw'), VolumeSpec.parse('/var/lib/docker/aaaaaaaa:/existing/volume:rw'),
parse_volume_spec('/var/lib/docker/cccccccc:/mnt/image/data:rw'), VolumeSpec.parse('/var/lib/docker/cccccccc:/mnt/image/data:rw'),
] ]
volumes = get_container_data_volumes(container, options) volumes = get_container_data_volumes(container, options)
self.assertEqual(sorted(volumes), sorted(expected)) assert sorted(volumes) == sorted(expected)
def test_merge_volume_bindings(self): def test_merge_volume_bindings(self):
options = [ options = [
'/host/volume:/host/volume:ro', VolumeSpec.parse('/host/volume:/host/volume:ro'),
'/host/rw/volume:/host/rw/volume', VolumeSpec.parse('/host/rw/volume:/host/rw/volume'),
'/new/volume', VolumeSpec.parse('/new/volume'),
'/existing/volume', VolumeSpec.parse('/existing/volume'),
] ]
self.mock_client.inspect_image.return_value = { self.mock_client.inspect_image.return_value = {
@ -626,8 +647,8 @@ class ServiceVolumesTest(unittest.TestCase):
'web', 'web',
image='busybox', image='busybox',
volumes=[ volumes=[
'/host/path:/data1', VolumeSpec.parse('/host/path:/data1'),
'/host/path:/data2', VolumeSpec.parse('/host/path:/data2'),
], ],
client=self.mock_client, client=self.mock_client,
) )
@ -656,7 +677,7 @@ class ServiceVolumesTest(unittest.TestCase):
service = Service( service = Service(
'web', 'web',
image='busybox', image='busybox',
volumes=['/host/path:/data'], volumes=[VolumeSpec.parse('/host/path:/data')],
client=self.mock_client, client=self.mock_client,
) )
@ -688,25 +709,53 @@ class ServiceVolumesTest(unittest.TestCase):
['/mnt/sda1/host/path:/data:rw'], ['/mnt/sda1/host/path:/data:rw'],
) )
def test_warn_on_masked_volume_no_warning_when_no_container_volumes(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
container_volumes = []
service = 'service_name'
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called
def test_warn_on_masked_volume_when_masked(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
container_volumes = [
VolumeSpec('/var/lib/docker/path', '/path', 'rw'),
VolumeSpec('/var/lib/docker/path', '/other', 'rw'),
]
service = 'service_name'
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
mock_log.warn.assert_called_once_with(mock.ANY)
def test_warn_on_masked_no_warning_with_same_path(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
container_volumes = [VolumeSpec('/home/user', '/path', 'rw')]
service = 'service_name'
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called
def test_create_with_special_volume_mode(self): def test_create_with_special_volume_mode(self):
self.mock_client.inspect_image.return_value = {'Id': 'imageid'} self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
create_calls = [] self.mock_client.create_container.return_value = {'Id': 'containerid'}
def create_container(*args, **kwargs):
create_calls.append((args, kwargs))
return {'Id': 'containerid'}
self.mock_client.create_container = create_container
volumes = ['/tmp:/foo:z']
volume = '/tmp:/foo:z'
Service( Service(
'web', 'web',
client=self.mock_client, client=self.mock_client,
image='busybox', image='busybox',
volumes=volumes, volumes=[VolumeSpec.parse(volume)],
).create_container() ).create_container()
self.assertEqual(len(create_calls), 1) assert self.mock_client.create_container.call_count == 1
self.assertEqual(self.mock_client.create_host_config.call_args[1]['binds'], volumes) self.assertEqual(
self.mock_client.create_host_config.call_args[1]['binds'],
[volume])

View File

@ -1,25 +1,21 @@
# encoding: utf-8 # encoding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
from .. import unittest
from compose import utils from compose import utils
class JsonSplitterTestCase(unittest.TestCase): class TestJsonSplitter(object):
def test_json_splitter_no_object(self): def test_json_splitter_no_object(self):
data = '{"foo": "bar' data = '{"foo": "bar'
self.assertEqual(utils.json_splitter(data), (None, None)) assert utils.json_splitter(data) is None
def test_json_splitter_with_object(self): def test_json_splitter_with_object(self):
data = '{"foo": "bar"}\n \n{"next": "obj"}' data = '{"foo": "bar"}\n \n{"next": "obj"}'
self.assertEqual( assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
utils.json_splitter(data),
({'foo': 'bar'}, '{"next": "obj"}')
)
class StreamAsTextTestCase(unittest.TestCase): class TestStreamAsText(object):
def test_stream_with_non_utf_unicode_character(self): def test_stream_with_non_utf_unicode_character(self):
stream = [b'\xed\xf3\xf3'] stream = [b'\xed\xf3\xf3']
@ -30,3 +26,19 @@ class StreamAsTextTestCase(unittest.TestCase):
stream = ['ěĝ'.encode('utf-8')] stream = ['ěĝ'.encode('utf-8')]
output, = utils.stream_as_text(stream) output, = utils.stream_as_text(stream)
assert output == 'ěĝ' assert output == 'ěĝ'
class TestJsonStream(object):
def test_with_falsy_entries(self):
stream = [
'{"one": "two"}\n{}\n',
"[1, 2, 3]\n[]\n",
]
output = list(utils.json_stream(stream))
assert output == [
{'one': 'two'},
{},
[1, 2, 3],
[],
]