diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index db2b6506b..0e7b9d5f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ - id: end-of-file-fixer - id: flake8 - id: name-tests-test - exclude: 'tests/integration/testcases.py' + exclude: 'tests/(integration/testcases\.py|helpers\.py)' - id: requirements-txt-fixer - id: trailing-whitespace - repo: git://github.com/asottile/reorder_python_imports diff --git a/.travis.yml b/.travis.yml index 3bb365a14..fbf269646 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,3 +25,5 @@ deploy: key: '$BINTRAY_API_KEY' file: ./bintray.json skip_cleanup: true + on: + all_branches: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b93087f0..8ee45386a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,94 @@ Change log ========== +1.7.0 (2016-04-13) +------------------ + +**Breaking Changes** + +- `docker-compose logs` no longer follows log output by default. It now + matches the behaviour of `docker logs` and exits after the current logs + are printed. Use `-f` to get the old default behaviour. + +- Booleans are no longer allows as values for mappings in the Compose file + (for keys `environment`, `labels` and `extra_hosts`). Previously this + was a warning. Boolean values should be quoted so they become string values. + +New Features + +- Compose now looks for a `.env` file in the directory where it's run and + reads any environment variables defined inside, if they're not already + set in the shell environment. This lets you easily set defaults for + variables used in the Compose file, or for any of the `COMPOSE_*` or + `DOCKER_*` variables. + +- Added a `--remove-orphans` flag to both `docker-compose up` and + `docker-compose down` to remove containers for services that were removed + from the Compose file. + +- Added a `--all` flag to `docker-compose rm` to include containers created + by `docker-compose run`. This will become the default behavior in the next + version of Compose. + +- Added support for all the same TLS configuration flags used by the `docker` + client: `--tls`, `--tlscert`, `--tlskey`, etc. + +- Compose files now support the `tmpfs` and `shm_size` options. + +- Added the `--workdir` flag to `docker-compose run` + +- `docker-compose logs` now shows logs for new containers that are created + after it starts. + +- The `COMPOSE_FILE` environment variable can now contain multiple files, + separated by the host system's standard path separator (`:` on Mac/Linux, + `;` on Windows). + +- You can now specify a static IP address when connecting a service to a + network with the `ipv4_address` and `ipv6_address` options. + +- Added `--follow`, `--timestamp`, and `--tail` flags to the + `docker-compose logs` command. + +- `docker-compose up`, and `docker-compose start` will now start containers + in parallel where possible. + +- `docker-compose stop` now stops containers in reverse dependency order + instead of all at once. + +- Added the `--build` flag to `docker-compose up` to force it to build a new + image. It now shows a warning if an image is automatically built when the + flag is not used. + +- Added the `docker-compose exec` command for executing a process in a running + container. + + +Bug Fixes + +- `docker-compose down` now removes containers created by + `docker-compose run`. + +- A more appropriate error is shown when a timeout is hit during `up` when + using a tty. + +- Fixed a bug in `docker-compose down` where it would abort if some resources + had already been removed. + +- Fixed a bug where changes to network aliases would not trigger a service + to be recreated. + +- Fix a bug where a log message was printed about creating a new volume + when it already existed. + +- Fixed a bug where interrupting `up` would not always shut down containers. + +- Fixed a bug where `log_opt` and `log_driver` were not properly carried over + when extending services in the v1 Compose file format. + +- Fixed a bug where empty values for build args would cause file validation + to fail. + 1.6.2 (2016-02-23) ------------------ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66224752d..50e58ddca 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,22 +50,22 @@ See Docker's [basic contribution workflow](https://docs.docker.com/opensource/wo Use the test script to run linting checks and then the full test suite against different Python interpreters: - $ script/test + $ script/test/default Tests are run against a Docker daemon inside a container, so that we can test against multiple Docker versions. By default they'll run against only the latest Docker version - set the `DOCKER_VERSIONS` environment variable to "all" to run against all supported versions: - $ DOCKER_VERSIONS=all script/test + $ DOCKER_VERSIONS=all script/test/default -Arguments to `script/test` are passed through to the `nosetests` executable, so +Arguments to `script/test/default` are passed through to the `tox` executable, so you can specify a test directory, file, module, class or method: - $ script/test tests/unit - $ script/test tests/unit/cli_test.py - $ script/test tests/unit/config_test.py::ConfigTest - $ script/test tests/unit/config_test.py::ConfigTest::test_load + $ script/test/default tests/unit + $ script/test/default tests/unit/cli_test.py + $ script/test/default tests/unit/config_test.py::ConfigTest + $ script/test/default tests/unit/config_test.py::ConfigTest::test_load ## Finding things to work on diff --git a/appveyor.yml b/appveyor.yml index b162db1e3..e4f39544a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -9,20 +9,16 @@ install: # Build the binary after tests build: false -environment: - BINTRAY_USER: "docker-compose-roleuser" - BINTRAY_PATH: "docker-compose/master/windows/master/docker-compose-Windows-x86_64.exe" - test_script: - "tox -e py27,py34 -- tests/unit" - - ps: ".\\script\\build-windows.ps1" - -deploy_script: - - "curl -sS - -u \"%BINTRAY_USER%:%BINTRAY_API_KEY%\" - -X PUT \"https://api.bintray.com/content/%BINTRAY_PATH%?override=1&publish=1\" - --data-binary @dist\\docker-compose-Windows-x86_64.exe" + - ps: ".\\script\\build\\windows.ps1" artifacts: - path: .\dist\docker-compose-Windows-x86_64.exe name: "Compose Windows binary" + +deploy: + - provider: Environment + name: master-builds + on: + branch: master diff --git a/compose/__init__.py b/compose/__init__.py index 83b4c7e6a..b2062199a 100644 --- a/compose/__init__.py +++ b/compose/__init__.py @@ -1,4 +1,4 @@ from __future__ import absolute_import from __future__ import unicode_literals -__version__ = '1.6.2' +__version__ = '1.7.0' diff --git a/compose/cli/command.py b/compose/cli/command.py index 2a0d86984..b7160deec 100644 --- a/compose/cli/command.py +++ b/compose/cli/command.py @@ -1,69 +1,53 @@ from __future__ import absolute_import from __future__ import unicode_literals -import contextlib import logging import os import re import six -from requests.exceptions import ConnectionError -from requests.exceptions import SSLError -from . import errors from . import verbose_proxy from .. import config +from ..config.environment import Environment from ..const import API_VERSIONS from ..project import Project from .docker_client import docker_client -from .utils import call_silently +from .docker_client import tls_config_from_options from .utils import get_version_info -from .utils import is_mac -from .utils import is_ubuntu log = logging.getLogger(__name__) -@contextlib.contextmanager -def friendly_error_message(): - try: - yield - except SSLError as e: - raise errors.UserError('SSL error: %s' % e) - except ConnectionError: - if call_silently(['which', 'docker']) != 0: - if is_mac(): - raise errors.DockerNotFoundMac() - elif is_ubuntu(): - raise errors.DockerNotFoundUbuntu() - else: - raise errors.DockerNotFoundGeneric() - elif call_silently(['which', 'docker-machine']) == 0: - raise errors.ConnectionErrorDockerMachine() - else: - raise errors.ConnectionErrorGeneric(get_client().base_url) - - -def project_from_options(base_dir, options): +def project_from_options(project_dir, options): + environment = Environment.from_env_file(project_dir) return get_project( - base_dir, - get_config_path_from_options(options), + project_dir, + get_config_path_from_options(project_dir, options, environment), project_name=options.get('--project-name'), verbose=options.get('--verbose'), + host=options.get('--host'), + tls_config=tls_config_from_options(options), + environment=environment ) -def get_config_path_from_options(options): +def get_config_path_from_options(base_dir, options, environment): file_option = options.get('--file') if file_option: return file_option - config_file = os.environ.get('COMPOSE_FILE') - return [config_file] if config_file else None + config_files = environment.get('COMPOSE_FILE') + if config_files: + return config_files.split(os.pathsep) + return None -def get_client(verbose=False, version=None): - client = docker_client(version=version) +def get_client(environment, verbose=False, version=None, tls_config=None, host=None): + client = docker_client( + version=version, tls_config=tls_config, host=host, + environment=environment + ) if verbose: version_info = six.iteritems(client.version()) log.info(get_version_info('full')) @@ -74,24 +58,34 @@ def get_client(verbose=False, version=None): return client -def get_project(base_dir, config_path=None, project_name=None, verbose=False): - config_details = config.find(base_dir, config_path) - project_name = get_project_name(config_details.working_dir, project_name) +def get_project(project_dir, config_path=None, project_name=None, verbose=False, + host=None, tls_config=None, environment=None): + if not environment: + environment = Environment.from_env_file(project_dir) + config_details = config.find(project_dir, config_path, environment) + project_name = get_project_name( + config_details.working_dir, project_name, environment + ) config_data = config.load(config_details) - api_version = os.environ.get( + api_version = environment.get( 'COMPOSE_API_VERSION', API_VERSIONS[config_data.version]) - client = get_client(verbose=verbose, version=api_version) + client = get_client( + verbose=verbose, version=api_version, tls_config=tls_config, + host=host, environment=environment + ) return Project.from_config(project_name, config_data, client) -def get_project_name(working_dir, project_name=None): +def get_project_name(working_dir, project_name=None, environment=None): def normalize_name(name): return re.sub(r'[^a-z0-9]', '', name.lower()) - project_name = project_name or os.environ.get('COMPOSE_PROJECT_NAME') + if not environment: + environment = Environment.from_env_file(working_dir) + project_name = project_name or environment.get('COMPOSE_PROJECT_NAME') if project_name: return normalize_name(project_name) diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py index b680616ef..0c0113bb7 100644 --- a/compose/cli/docker_client.py +++ b/compose/cli/docker_client.py @@ -2,10 +2,10 @@ from __future__ import absolute_import from __future__ import unicode_literals import logging -import os from docker import Client from docker.errors import TLSParameterError +from docker.tls import TLSConfig from docker.utils import kwargs_from_env from ..const import HTTP_TIMEOUT @@ -14,24 +14,60 @@ from .errors import UserError log = logging.getLogger(__name__) -def docker_client(version=None): +def tls_config_from_options(options): + tls = options.get('--tls', False) + ca_cert = options.get('--tlscacert') + cert = options.get('--tlscert') + key = options.get('--tlskey') + verify = options.get('--tlsverify') + skip_hostname_check = options.get('--skip-hostname-check', False) + + advanced_opts = any([ca_cert, cert, key, verify]) + + if tls is True and not advanced_opts: + return True + elif advanced_opts: # --tls is a noop + client_cert = None + if cert or key: + client_cert = (cert, key) + + return TLSConfig( + client_cert=client_cert, verify=verify, ca_cert=ca_cert, + assert_hostname=False if skip_hostname_check else None + ) + + return None + + +def docker_client(environment, version=None, tls_config=None, host=None): """ Returns a docker-py client configured using environment variables according to the same logic as the official Docker client. """ - if 'DOCKER_CLIENT_TIMEOUT' in os.environ: - log.warn('The DOCKER_CLIENT_TIMEOUT environment variable is deprecated. Please use COMPOSE_HTTP_TIMEOUT instead.') + if 'DOCKER_CLIENT_TIMEOUT' in environment: + log.warn("The DOCKER_CLIENT_TIMEOUT environment variable is deprecated. " + "Please use COMPOSE_HTTP_TIMEOUT instead.") try: - kwargs = kwargs_from_env(assert_hostname=False) + kwargs = kwargs_from_env(environment=environment) except TLSParameterError: raise UserError( - 'TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY and DOCKER_CERT_PATH are set correctly.\n' - 'You might need to run `eval "$(docker-machine env default)"`') + "TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY " + "and DOCKER_CERT_PATH are set correctly.\n" + "You might need to run `eval \"$(docker-machine env default)\"`") + + if host: + kwargs['base_url'] = host + if tls_config: + kwargs['tls'] = tls_config if version: kwargs['version'] = version - kwargs['timeout'] = HTTP_TIMEOUT + timeout = environment.get('COMPOSE_HTTP_TIMEOUT') + if timeout: + kwargs['timeout'] = int(timeout) + else: + kwargs['timeout'] = HTTP_TIMEOUT return Client(**kwargs) diff --git a/compose/cli/docopt_command.py b/compose/cli/docopt_command.py index e3f4aa9e5..809a4b745 100644 --- a/compose/cli/docopt_command.py +++ b/compose/cli/docopt_command.py @@ -1,7 +1,6 @@ from __future__ import absolute_import from __future__ import unicode_literals -import sys from inspect import getdoc from docopt import docopt @@ -15,24 +14,21 @@ def docopt_full_help(docstring, *args, **kwargs): raise SystemExit(docstring) -class DocoptCommand(object): - def docopt_options(self): - return {'options_first': True} +class DocoptDispatcher(object): - def sys_dispatch(self): - self.dispatch(sys.argv[1:], None) + def __init__(self, command_class, options): + self.command_class = command_class + self.options = options - def dispatch(self, argv, global_options): - self.perform_command(*self.parse(argv, global_options)) - - def parse(self, argv, global_options): - options = docopt_full_help(getdoc(self), argv, **self.docopt_options()) + def parse(self, argv): + command_help = getdoc(self.command_class) + options = docopt_full_help(command_help, argv, **self.options) command = options['COMMAND'] if command is None: - raise SystemExit(getdoc(self)) + raise SystemExit(command_help) - handler = self.get_handler(command) + handler = get_handler(self.command_class, command) docstring = getdoc(handler) if docstring is None: @@ -41,13 +37,18 @@ class DocoptCommand(object): command_options = docopt_full_help(docstring, options['ARGS'], options_first=True) return options, handler, command_options - def get_handler(self, command): - command = command.replace('-', '_') - if not hasattr(self, command): - raise NoSuchCommand(command, self) +def get_handler(command_class, command): + command = command.replace('-', '_') + # we certainly want to have "exec" command, since that's what docker client has + # but in python exec is a keyword + if command == "exec": + command = "exec_command" - return getattr(self, command) + if not hasattr(command_class, command): + raise NoSuchCommand(command, command_class) + + return getattr(command_class, command) class NoSuchCommand(Exception): diff --git a/compose/cli/errors.py b/compose/cli/errors.py index 03d6a50c6..2c68d36db 100644 --- a/compose/cli/errors.py +++ b/compose/cli/errors.py @@ -1,10 +1,29 @@ from __future__ import absolute_import from __future__ import unicode_literals +import contextlib +import logging +import socket from textwrap import dedent +from docker.errors import APIError +from requests.exceptions import ConnectionError as RequestsConnectionError +from requests.exceptions import ReadTimeout +from requests.exceptions import SSLError +from requests.packages.urllib3.exceptions import ReadTimeoutError + +from ..const import API_VERSION_TO_ENGINE_VERSION +from ..const import HTTP_TIMEOUT +from .utils import call_silently +from .utils import is_mac +from .utils import is_ubuntu + + +log = logging.getLogger(__name__) + class UserError(Exception): + def __init__(self, msg): self.msg = dedent(msg).strip() @@ -14,44 +33,98 @@ class UserError(Exception): __str__ = __unicode__ -class DockerNotFoundMac(UserError): - def __init__(self): - super(DockerNotFoundMac, self).__init__(""" - Couldn't connect to Docker daemon. You might need to install docker-osx: - - https://github.com/noplay/docker-osx - """) +class ConnectionError(Exception): + pass -class DockerNotFoundUbuntu(UserError): - def __init__(self): - super(DockerNotFoundUbuntu, self).__init__(""" - Couldn't connect to Docker daemon. You might need to install Docker: +@contextlib.contextmanager +def handle_connection_errors(client): + try: + yield + except SSLError as e: + log.error('SSL error: %s' % e) + raise ConnectionError() + except RequestsConnectionError as e: + if e.args and isinstance(e.args[0], ReadTimeoutError): + log_timeout_error() + raise ConnectionError() - https://docs.docker.com/engine/installation/ubuntulinux/ - """) + if call_silently(['which', 'docker']) != 0: + if is_mac(): + exit_with_error(docker_not_found_mac) + if is_ubuntu(): + exit_with_error(docker_not_found_ubuntu) + exit_with_error(docker_not_found_generic) + if call_silently(['which', 'docker-machine']) == 0: + exit_with_error(conn_error_docker_machine) + exit_with_error(conn_error_generic.format(url=client.base_url)) + except APIError as e: + log_api_error(e, client.api_version) + raise ConnectionError() + except (ReadTimeout, socket.timeout) as e: + log_timeout_error() + raise ConnectionError() -class DockerNotFoundGeneric(UserError): - def __init__(self): - super(DockerNotFoundGeneric, self).__init__(""" - Couldn't connect to Docker daemon. You might need to install Docker: - - https://docs.docker.com/engine/installation/ - """) +def log_timeout_error(): + log.error( + "An HTTP request took too long to complete. Retry with --verbose to " + "obtain debug information.\n" + "If you encounter this issue regularly because of slow network " + "conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher " + "value (current value: %s)." % HTTP_TIMEOUT) -class ConnectionErrorDockerMachine(UserError): - def __init__(self): - super(ConnectionErrorDockerMachine, self).__init__(""" - Couldn't connect to Docker daemon - you might need to run `docker-machine start default`. - """) +def log_api_error(e, client_version): + if b'client is newer than server' not in e.explanation: + log.error(e.explanation) + return + + version = API_VERSION_TO_ENGINE_VERSION.get(client_version) + if not version: + # They've set a custom API version + log.error(e.explanation) + return + + log.error( + "The Docker Engine version is less than the minimum required by " + "Compose. Your current project requires a Docker Engine of " + "version {version} or greater.".format(version=version)) -class ConnectionErrorGeneric(UserError): - def __init__(self, url): - super(ConnectionErrorGeneric, self).__init__(""" - Couldn't connect to Docker daemon at %s - is it running? +def exit_with_error(msg): + log.error(dedent(msg).strip()) + raise ConnectionError() - If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable. - """ % url) + +docker_not_found_mac = """ + Couldn't connect to Docker daemon. You might need to install Docker: + + https://docs.docker.com/engine/installation/mac/ +""" + + +docker_not_found_ubuntu = """ + Couldn't connect to Docker daemon. You might need to install Docker: + + https://docs.docker.com/engine/installation/ubuntulinux/ +""" + + +docker_not_found_generic = """ + Couldn't connect to Docker daemon. You might need to install Docker: + + https://docs.docker.com/engine/installation/ +""" + + +conn_error_docker_machine = """ + Couldn't connect to Docker daemon - you might need to run `docker-machine start default`. +""" + + +conn_error_generic = """ + Couldn't connect to Docker daemon at {url} - is it running? + + If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable. +""" diff --git a/compose/cli/log_printer.py b/compose/cli/log_printer.py index 85fef794f..b48462ff5 100644 --- a/compose/cli/log_printer.py +++ b/compose/cli/log_printer.py @@ -2,60 +2,152 @@ from __future__ import absolute_import from __future__ import unicode_literals import sys +from collections import namedtuple from itertools import cycle +from threading import Thread + +from six.moves import _thread as thread +from six.moves.queue import Empty +from six.moves.queue import Queue from . import colors -from .multiplexer import Multiplexer from compose import utils +from compose.cli.signals import ShutdownException from compose.utils import split_buffer -class LogPrinter(object): - """Print logs from many containers to a single output stream.""" +class LogPresenter(object): - def __init__(self, containers, output=sys.stdout, monochrome=False, cascade_stop=False): - self.containers = containers - self.output = utils.get_output_stream(output) - self.monochrome = monochrome - self.cascade_stop = cascade_stop + def __init__(self, prefix_width, color_func): + self.prefix_width = prefix_width + self.color_func = color_func - def run(self): - if not self.containers: - return - - prefix_width = max_name_width(self.containers) - generators = list(self._make_log_generators(self.monochrome, prefix_width)) - for line in Multiplexer(generators, cascade_stop=self.cascade_stop).loop(): - self.output.write(line) - self.output.flush() - - def _make_log_generators(self, monochrome, prefix_width): - def no_color(text): - return text - - if monochrome: - color_funcs = cycle([no_color]) - else: - color_funcs = cycle(colors.rainbow()) - - for color_func, container in zip(color_funcs, self.containers): - generator_func = get_log_generator(container) - prefix = color_func(build_log_prefix(container, prefix_width)) - yield generator_func(container, prefix, color_func) + def present(self, container, line): + prefix = container.name_without_project.ljust(self.prefix_width) + return '{prefix} {line}'.format( + prefix=self.color_func(prefix + ' |'), + line=line) -def build_log_prefix(container, prefix_width): - return container.name_without_project.ljust(prefix_width) + ' | ' +def build_log_presenters(service_names, monochrome): + """Return an iterable of functions. + + Each function can be used to format the logs output of a container. + """ + prefix_width = max_name_width(service_names) + + def no_color(text): + return text + + for color_func in cycle([no_color] if monochrome else colors.rainbow()): + yield LogPresenter(prefix_width, color_func) -def max_name_width(containers): +def max_name_width(service_names, max_index_width=3): """Calculate the maximum width of container names so we can make the log prefixes line up like so: db_1 | Listening web_1 | Listening """ - return max(len(container.name_without_project) for container in containers) + return max(len(name) for name in service_names) + max_index_width + + +class LogPrinter(object): + """Print logs from many containers to a single output stream.""" + + def __init__(self, + containers, + presenters, + event_stream, + output=sys.stdout, + cascade_stop=False, + log_args=None): + self.containers = containers + self.presenters = presenters + self.event_stream = event_stream + self.output = utils.get_output_stream(output) + self.cascade_stop = cascade_stop + self.log_args = log_args or {} + + def run(self): + if not self.containers: + return + + queue = Queue() + thread_args = queue, self.log_args + thread_map = build_thread_map(self.containers, self.presenters, thread_args) + start_producer_thread(( + thread_map, + self.event_stream, + self.presenters, + thread_args)) + + for line in consume_queue(queue, self.cascade_stop): + remove_stopped_threads(thread_map) + + if not line: + if not thread_map: + # There are no running containers left to tail, so exit + return + # We got an empty line because of a timeout, but there are still + # active containers to tail, so continue + continue + + self.output.write(line) + self.output.flush() + + +def remove_stopped_threads(thread_map): + for container_id, tailer_thread in list(thread_map.items()): + if not tailer_thread.is_alive(): + thread_map.pop(container_id, None) + + +def build_thread(container, presenter, queue, log_args): + tailer = Thread( + target=tail_container_logs, + args=(container, presenter, queue, log_args)) + tailer.daemon = True + tailer.start() + return tailer + + +def build_thread_map(initial_containers, presenters, thread_args): + return { + container.id: build_thread(container, next(presenters), *thread_args) + for container in initial_containers + } + + +class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')): + + @classmethod + def new(cls, item): + return cls(item, None, None) + + @classmethod + def exception(cls, exc): + return cls(None, None, exc) + + @classmethod + def stop(cls): + return cls(None, True, None) + + +def tail_container_logs(container, presenter, queue, log_args): + generator = get_log_generator(container) + + try: + for item in generator(container, log_args): + queue.put(QueueItem.new(presenter.present(container, item))) + except Exception as e: + queue.put(QueueItem.exception(e)) + return + + if log_args.get('follow'): + queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container)))) + queue.put(QueueItem.stop()) def get_log_generator(container): @@ -64,30 +156,75 @@ def get_log_generator(container): return build_no_log_generator -def build_no_log_generator(container, prefix, color_func): +def build_no_log_generator(container, log_args): """Return a generator that prints a warning about logs and waits for container to exit. """ - yield "{} WARNING: no logs are available with the '{}' log driver\n".format( - prefix, + yield "WARNING: no logs are available with the '{}' log driver\n".format( container.log_driver) - yield color_func(wait_on_exit(container)) -def build_log_generator(container, prefix, color_func): +def build_log_generator(container, log_args): # if the container doesn't have a log_stream we need to attach to container # before log printer starts running if container.log_stream is None: - stream = container.attach(stdout=True, stderr=True, stream=True, logs=True) - line_generator = split_buffer(stream) + stream = container.logs(stdout=True, stderr=True, stream=True, **log_args) else: - line_generator = split_buffer(container.log_stream) + stream = container.log_stream - for line in line_generator: - yield prefix + line - yield color_func(wait_on_exit(container)) + return split_buffer(stream) def wait_on_exit(container): exit_code = container.wait() return "%s exited with code %s\n" % (container.name, exit_code) + + +def start_producer_thread(thread_args): + producer = Thread(target=watch_events, args=thread_args) + producer.daemon = True + producer.start() + + +def watch_events(thread_map, event_stream, presenters, thread_args): + for event in event_stream: + if event['action'] == 'stop': + thread_map.pop(event['id'], None) + + if event['action'] != 'start': + continue + + if event['id'] in thread_map: + if thread_map[event['id']].is_alive(): + continue + # Container was stopped and started, we need a new thread + thread_map.pop(event['id'], None) + + thread_map[event['id']] = build_thread( + event['container'], + next(presenters), + *thread_args) + + +def consume_queue(queue, cascade_stop): + """Consume the queue by reading lines off of it and yielding them.""" + while True: + try: + item = queue.get(timeout=0.1) + except Empty: + yield None + continue + # See https://github.com/docker/compose/issues/189 + except thread.error: + raise ShutdownException() + + if item.exc: + raise item.exc + + if item.is_stop: + if cascade_stop: + raise StopIteration + else: + continue + + yield item.item diff --git a/compose/cli/main.py b/compose/cli/main.py index 3c4b5721d..8348b8c37 100644 --- a/compose/cli/main.py +++ b/compose/cli/main.py @@ -3,6 +3,7 @@ from __future__ import print_function from __future__ import unicode_literals import contextlib +import functools import json import logging import re @@ -10,63 +11,56 @@ import sys from inspect import getdoc from operator import attrgetter -from docker.errors import APIError -from requests.exceptions import ReadTimeout - +from . import errors from . import signals from .. import __version__ from ..config import config from ..config import ConfigurationError from ..config import parse_environment +from ..config.environment import Environment from ..config.serialize import serialize_config -from ..const import API_VERSION_TO_ENGINE_VERSION from ..const import DEFAULT_TIMEOUT -from ..const import HTTP_TIMEOUT from ..const import IS_WINDOWS_PLATFORM from ..progress_stream import StreamOutputError from ..project import NoSuchService +from ..project import OneOffFilter +from ..service import BuildAction from ..service import BuildError from ..service import ConvergenceStrategy from ..service import ImageType from ..service import NeedsBuildError -from .command import friendly_error_message from .command import get_config_path_from_options from .command import project_from_options -from .docopt_command import DocoptCommand +from .docopt_command import DocoptDispatcher +from .docopt_command import get_handler from .docopt_command import NoSuchCommand from .errors import UserError from .formatter import ConsoleWarningFormatter from .formatter import Formatter +from .log_printer import build_log_presenters from .log_printer import LogPrinter from .utils import get_version_info from .utils import yesno if not IS_WINDOWS_PLATFORM: - from dockerpty.pty import PseudoTerminal, RunOperation + from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation log = logging.getLogger(__name__) console_handler = logging.StreamHandler(sys.stderr) def main(): - setup_logging() + command = dispatch() + try: - command = TopLevelCommand() - command.sys_dispatch() - except KeyboardInterrupt: + command() + except (KeyboardInterrupt, signals.ShutdownException): log.error("Aborting.") sys.exit(1) except (UserError, NoSuchService, ConfigurationError) as e: log.error(e.msg) sys.exit(1) - except NoSuchCommand as e: - commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand))) - log.error("No such command: %s\n\n%s", e.command, commands) - sys.exit(1) - except APIError as e: - log_api_error(e) - sys.exit(1) except BuildError as e: log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason)) sys.exit(1) @@ -76,29 +70,42 @@ def main(): except NeedsBuildError as e: log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name) sys.exit(1) - except ReadTimeout as e: - log.error( - "An HTTP request took too long to complete. Retry with --verbose to obtain debug information.\n" - "If you encounter this issue regularly because of slow network conditions, consider setting " - "COMPOSE_HTTP_TIMEOUT to a higher value (current value: %s)." % HTTP_TIMEOUT - ) + except errors.ConnectionError: sys.exit(1) -def log_api_error(e): - if 'client is newer than server' in e.explanation: - # we need JSON formatted errors. In the meantime... - # TODO: fix this by refactoring project dispatch - # http://github.com/docker/compose/pull/2832#commitcomment-15923800 - client_version = e.explanation.split('client API version: ')[1].split(',')[0] - log.error( - "The engine version is lesser than the minimum required by " - "compose. Your current project requires a Docker Engine of " - "version {version} or superior.".format( - version=API_VERSION_TO_ENGINE_VERSION[client_version] - )) - else: - log.error(e.explanation) +def dispatch(): + setup_logging() + dispatcher = DocoptDispatcher( + TopLevelCommand, + {'options_first': True, 'version': get_version_info('compose')}) + + try: + options, handler, command_options = dispatcher.parse(sys.argv[1:]) + except NoSuchCommand as e: + commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand))) + log.error("No such command: %s\n\n%s", e.command, commands) + sys.exit(1) + + setup_console_handler(console_handler, options.get('--verbose')) + return functools.partial(perform_command, options, handler, command_options) + + +def perform_command(options, handler, command_options): + if options['COMMAND'] in ('help', 'version'): + # Skip looking up the compose file. + handler(command_options) + return + + if options['COMMAND'] == 'config': + command = TopLevelCommand(None) + handler(command, options, command_options) + return + + project = project_from_options('.', options) + command = TopLevelCommand(project) + with errors.handle_connection_errors(project.client): + handler(command, command_options) def setup_logging(): @@ -131,7 +138,7 @@ def parse_doc_section(name, source): return [s.strip() for s in pattern.findall(source)] -class TopLevelCommand(DocoptCommand): +class TopLevelCommand(object): """Define and run multi-container applications with Docker. Usage: @@ -139,10 +146,20 @@ class TopLevelCommand(DocoptCommand): docker-compose -h|--help Options: - -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) - -p, --project-name NAME Specify an alternate project name (default: directory name) - --verbose Show more output - -v, --version Print version and exit + -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) + -p, --project-name NAME Specify an alternate project name (default: directory name) + --verbose Show more output + -v, --version Print version and exit + -H, --host HOST Daemon socket to connect to + + --tls Use TLS; implied by --tlsverify + --tlscacert CA_PATH Trust certs signed only by this CA + --tlscert CLIENT_CERT_PATH Path to TLS certificate file + --tlskey TLS_KEY_PATH Path to TLS key file + --tlsverify Use TLS and verify the remote + --skip-hostname-check Don't check the daemon's hostname against the name specified + in the client certificate (for example if your docker host + is an IP address) Commands: build Build or rebuild services @@ -150,6 +167,7 @@ class TopLevelCommand(DocoptCommand): create Create services down Stop and remove containers, networks, images, and volumes events Receive real time events from containers + exec Execute a command in a running container help Get help on a command kill Kill containers logs View output from containers @@ -167,30 +185,12 @@ class TopLevelCommand(DocoptCommand): up Create and start containers version Show the Docker-Compose version information """ - base_dir = '.' - def docopt_options(self): - options = super(TopLevelCommand, self).docopt_options() - options['version'] = get_version_info('compose') - return options + def __init__(self, project, project_dir='.'): + self.project = project + self.project_dir = '.' - def perform_command(self, options, handler, command_options): - setup_console_handler(console_handler, options.get('--verbose')) - - if options['COMMAND'] in ('help', 'version'): - # Skip looking up the compose file. - handler(None, command_options) - return - - if options['COMMAND'] == 'config': - handler(options, command_options) - return - - project = project_from_options(self.base_dir, options) - with friendly_error_message(): - handler(project, command_options) - - def build(self, project, options): + def build(self, options): """ Build or rebuild services. @@ -205,7 +205,7 @@ class TopLevelCommand(DocoptCommand): --no-cache Do not use cache when building the image. --pull Always attempt to pull a newer version of the image. """ - project.build( + self.project.build( service_names=options['SERVICE'], no_cache=bool(options.get('--no-cache', False)), pull=bool(options.get('--pull', False)), @@ -223,8 +223,13 @@ class TopLevelCommand(DocoptCommand): --services Print the service names, one per line. """ - config_path = get_config_path_from_options(config_options) - compose_config = config.load(config.find(self.base_dir, config_path)) + environment = Environment.from_env_file(self.project_dir) + config_path = get_config_path_from_options( + self.project_dir, config_options, environment + ) + compose_config = config.load( + config.find(self.project_dir, config_path, environment) + ) if options['--quiet']: return @@ -235,7 +240,7 @@ class TopLevelCommand(DocoptCommand): print(serialize_config(compose_config)) - def create(self, project, options): + def create(self, options): """ Creates containers for a service. @@ -246,17 +251,18 @@ class TopLevelCommand(DocoptCommand): image haven't changed. Incompatible with --no-recreate. --no-recreate If containers already exist, don't recreate them. Incompatible with --force-recreate. - --no-build Don't build an image, even if it's missing + --no-build Don't build an image, even if it's missing. + --build Build images before creating containers. """ service_names = options['SERVICE'] - project.create( + self.project.create( service_names=service_names, strategy=convergence_strategy_from_opts(options), - do_build=not options['--no-build'] + do_build=build_action_from_opts(options), ) - def down(self, project, options): + def down(self, options): """ Stop containers and remove containers, networks, volumes, and images created by `up`. Only containers and networks are removed by default. @@ -264,15 +270,17 @@ class TopLevelCommand(DocoptCommand): Usage: down [options] Options: - --rmi type Remove images, type may be one of: 'all' to remove - all images, or 'local' to remove only images that - don't have an custom name set by the `image` field - -v, --volumes Remove data volumes + --rmi type Remove images, type may be one of: 'all' to remove + all images, or 'local' to remove only images that + don't have an custom name set by the `image` field + -v, --volumes Remove data volumes + --remove-orphans Remove containers for services not defined in + the Compose file """ image_type = image_type_from_opt('--rmi', options['--rmi']) - project.down(image_type, options['--volumes']) + self.project.down(image_type, options['--volumes'], options['--remove-orphans']) - def events(self, project, options): + def events(self, options): """ Receive real time events from containers. @@ -289,23 +297,76 @@ class TopLevelCommand(DocoptCommand): def json_format_event(event): event['time'] = event['time'].isoformat() + event.pop('container') return json.dumps(event) - for event in project.events(): + for event in self.project.events(): formatter = json_format_event if options['--json'] else format_event print(formatter(event)) sys.stdout.flush() - def help(self, project, options): + def exec_command(self, options): + """ + Execute a command in a running container + + Usage: exec [options] SERVICE COMMAND [ARGS...] + + Options: + -d Detached mode: Run command in the background. + --privileged Give extended privileges to the process. + --user USER Run the command as this user. + -T Disable pseudo-tty allocation. By default `docker-compose exec` + allocates a TTY. + --index=index index of the container if there are multiple + instances of a service [default: 1] + """ + index = int(options.get('--index')) + service = self.project.get_service(options['SERVICE']) + try: + container = service.get_container(number=index) + except ValueError as e: + raise UserError(str(e)) + command = [options['COMMAND']] + options['ARGS'] + tty = not options["-T"] + + create_exec_options = { + "privileged": options["--privileged"], + "user": options["--user"], + "tty": tty, + "stdin": tty, + } + + exec_id = container.create_exec(command, **create_exec_options) + + if options['-d']: + container.start_exec(exec_id, tty=tty) + return + + signals.set_signal_handler_to_shutdown() + try: + operation = ExecOperation( + self.project.client, + exec_id, + interactive=tty, + ) + pty = PseudoTerminal(self.project.client, operation) + pty.start() + except signals.ShutdownException: + log.info("received shutdown exception: closing") + exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode") + sys.exit(exit_code) + + @classmethod + def help(cls, options): """ Get help on a command. Usage: help COMMAND """ - handler = self.get_handler(options['COMMAND']) + handler = get_handler(cls, options['COMMAND']) raise SystemExit(getdoc(handler)) - def kill(self, project, options): + def kill(self, options): """ Force stop service containers. @@ -317,33 +378,51 @@ class TopLevelCommand(DocoptCommand): """ signal = options.get('-s', 'SIGKILL') - project.kill(service_names=options['SERVICE'], signal=signal) + self.project.kill(service_names=options['SERVICE'], signal=signal) - def logs(self, project, options): + def logs(self, options): """ View output from containers. Usage: logs [options] [SERVICE...] Options: - --no-color Produce monochrome output. + --no-color Produce monochrome output. + -f, --follow Follow log output. + -t, --timestamps Show timestamps. + --tail="all" Number of lines to show from the end of the logs + for each container. """ - containers = project.containers(service_names=options['SERVICE'], stopped=True) + containers = self.project.containers(service_names=options['SERVICE'], stopped=True) - monochrome = options['--no-color'] + tail = options['--tail'] + if tail is not None: + if tail.isdigit(): + tail = int(tail) + elif tail != 'all': + raise UserError("tail flag must be all or a number") + log_args = { + 'follow': options['--follow'], + 'tail': tail, + 'timestamps': options['--timestamps'] + } print("Attaching to", list_containers(containers)) - LogPrinter(containers, monochrome=monochrome).run() + log_printer_from_project( + self.project, + containers, + options['--no-color'], + log_args).run() - def pause(self, project, options): + def pause(self, options): """ Pause services. Usage: pause [SERVICE...] """ - containers = project.pause(service_names=options['SERVICE']) + containers = self.project.pause(service_names=options['SERVICE']) exit_if(not containers, 'No containers to pause', 1) - def port(self, project, options): + def port(self, options): """ Print the public port for a port binding. @@ -355,7 +434,7 @@ class TopLevelCommand(DocoptCommand): instances of a service [default: 1] """ index = int(options.get('--index')) - service = project.get_service(options['SERVICE']) + service = self.project.get_service(options['SERVICE']) try: container = service.get_container(number=index) except ValueError as e: @@ -364,7 +443,7 @@ class TopLevelCommand(DocoptCommand): options['PRIVATE_PORT'], protocol=options.get('--protocol') or 'tcp') or '') - def ps(self, project, options): + def ps(self, options): """ List containers. @@ -374,8 +453,8 @@ class TopLevelCommand(DocoptCommand): -q Only display IDs """ containers = sorted( - project.containers(service_names=options['SERVICE'], stopped=True) + - project.containers(service_names=options['SERVICE'], one_off=True), + self.project.containers(service_names=options['SERVICE'], stopped=True) + + self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only), key=attrgetter('name')) if options['-q']: @@ -401,7 +480,7 @@ class TopLevelCommand(DocoptCommand): ]) print(Formatter().table(headers, rows)) - def pull(self, project, options): + def pull(self, options): """ Pulls images for services. @@ -410,12 +489,12 @@ class TopLevelCommand(DocoptCommand): Options: --ignore-pull-failures Pull what it can and ignores images with pull failures. """ - project.pull( + self.project.pull( service_names=options['SERVICE'], ignore_pull_failures=options.get('--ignore-pull-failures') ) - def rm(self, project, options): + def rm(self, options): """ Remove stopped service containers. @@ -429,22 +508,36 @@ class TopLevelCommand(DocoptCommand): Options: -f, --force Don't ask to confirm removal -v Remove volumes associated with containers + -a, --all Also remove one-off containers created by + docker-compose run """ - all_containers = project.containers(service_names=options['SERVICE'], stopped=True) + if options.get('--all'): + one_off = OneOffFilter.include + else: + log.warn( + 'Not including one-off containers created by `docker-compose run`.\n' + 'To include them, use `docker-compose rm --all`.\n' + 'This will be the default behavior in the next version of Compose.\n') + one_off = OneOffFilter.exclude + + all_containers = self.project.containers( + service_names=options['SERVICE'], stopped=True, one_off=one_off + ) stopped_containers = [c for c in all_containers if not c.is_running] if len(stopped_containers) > 0: print("Going to remove", list_containers(stopped_containers)) if options.get('--force') \ or yesno("Are you sure? [yN] ", default=False): - project.remove_stopped( + self.project.remove_stopped( service_names=options['SERVICE'], - v=options.get('-v', False) + v=options.get('-v', False), + one_off=one_off ) else: print("No stopped containers") - def run(self, project, options): + def run(self, options): """ Run a one-off command on a service. @@ -472,8 +565,9 @@ class TopLevelCommand(DocoptCommand): to the host. -T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY. + -w, --workdir="" Working directory inside the container """ - service = project.get_service(options['SERVICE']) + service = self.project.get_service(options['SERVICE']) detach = options['-d'] if IS_WINDOWS_PLATFORM and not detach: @@ -482,48 +576,21 @@ class TopLevelCommand(DocoptCommand): "Please pass the -d flag when using `docker-compose run`." ) - if options['COMMAND']: - command = [options['COMMAND']] + options['ARGS'] - else: - command = service.options.get('command') - - container_options = { - 'command': command, - 'tty': not (detach or options['-T'] or not sys.stdin.isatty()), - 'stdin_open': not detach, - 'detach': detach, - } - - if options['-e']: - container_options['environment'] = parse_environment(options['-e']) - - if options['--entrypoint']: - container_options['entrypoint'] = options.get('--entrypoint') - - if options['--rm']: - container_options['restart'] = None - - if options['--user']: - container_options['user'] = options.get('--user') - - if not options['--service-ports']: - container_options['ports'] = [] - - if options['--publish']: - container_options['ports'] = options.get('--publish') - if options['--publish'] and options['--service-ports']: raise UserError( 'Service port mapping and manual port mapping ' 'can not be used togather' ) - if options['--name']: - container_options['name'] = options['--name'] + if options['COMMAND']: + command = [options['COMMAND']] + options['ARGS'] + else: + command = service.options.get('command') - run_one_off_container(container_options, project, service, options) + container_options = build_container_options(options, detach, command) + run_one_off_container(container_options, self.project, service, options) - def scale(self, project, options): + def scale(self, options): """ Set number of containers to run for a service. @@ -549,18 +616,18 @@ class TopLevelCommand(DocoptCommand): except ValueError: raise UserError('Number of containers for service "%s" is not a ' 'number' % service_name) - project.get_service(service_name).scale(num, timeout=timeout) + self.project.get_service(service_name).scale(num, timeout=timeout) - def start(self, project, options): + def start(self, options): """ Start existing containers. Usage: start [SERVICE...] """ - containers = project.start(service_names=options['SERVICE']) + containers = self.project.start(service_names=options['SERVICE']) exit_if(not containers, 'No containers to start', 1) - def stop(self, project, options): + def stop(self, options): """ Stop running containers without removing them. @@ -573,9 +640,9 @@ class TopLevelCommand(DocoptCommand): (default: 10) """ timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT) - project.stop(service_names=options['SERVICE'], timeout=timeout) + self.project.stop(service_names=options['SERVICE'], timeout=timeout) - def restart(self, project, options): + def restart(self, options): """ Restart running containers. @@ -586,19 +653,19 @@ class TopLevelCommand(DocoptCommand): (default: 10) """ timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT) - containers = project.restart(service_names=options['SERVICE'], timeout=timeout) + containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout) exit_if(not containers, 'No containers to restart', 1) - def unpause(self, project, options): + def unpause(self, options): """ Unpause services. Usage: unpause [SERVICE...] """ - containers = project.unpause(service_names=options['SERVICE']) + containers = self.project.unpause(service_names=options['SERVICE']) exit_if(not containers, 'No containers to unpause', 1) - def up(self, project, options): + def up(self, options): """ Builds, (re)creates, starts, and attaches to containers for a service. @@ -630,43 +697,55 @@ class TopLevelCommand(DocoptCommand): Incompatible with --no-recreate. --no-recreate If containers already exist, don't recreate them. Incompatible with --force-recreate. - --no-build Don't build an image, even if it's missing + --no-build Don't build an image, even if it's missing. + --build Build images before starting containers. --abort-on-container-exit Stops all containers if any container was stopped. Incompatible with -d. -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10) + --remove-orphans Remove containers for services not + defined in the Compose file """ - monochrome = options['--no-color'] start_deps = not options['--no-deps'] cascade_stop = options['--abort-on-container-exit'] service_names = options['SERVICE'] timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT) + remove_orphans = options['--remove-orphans'] detached = options.get('-d') if detached and cascade_stop: raise UserError("--abort-on-container-exit and -d cannot be combined.") - with up_shutdown_context(project, service_names, timeout, detached): - to_attach = project.up( + with up_shutdown_context(self.project, service_names, timeout, detached): + to_attach = self.project.up( service_names=service_names, start_deps=start_deps, strategy=convergence_strategy_from_opts(options), - do_build=not options['--no-build'], + do_build=build_action_from_opts(options), timeout=timeout, - detached=detached) + detached=detached, + remove_orphans=remove_orphans) if detached: return - log_printer = build_log_printer(to_attach, service_names, monochrome, cascade_stop) + + log_printer = log_printer_from_project( + self.project, + filter_containers_to_service_names(to_attach, service_names), + options['--no-color'], + {'follow': True}, + cascade_stop, + event_stream=self.project.events(service_names=service_names)) print("Attaching to", list_containers(log_printer.containers)) log_printer.run() if cascade_stop: print("Aborting on container exit...") - project.stop(service_names=service_names, timeout=timeout) + self.project.stop(service_names=service_names, timeout=timeout) - def version(self, project, options): + @classmethod + def version(cls, options): """ Show version informations @@ -705,6 +784,54 @@ def image_type_from_opt(flag, value): raise UserError("%s flag must be one of: all, local" % flag) +def build_action_from_opts(options): + if options['--build'] and options['--no-build']: + raise UserError("--build and --no-build can not be combined.") + + if options['--build']: + return BuildAction.force + + if options['--no-build']: + return BuildAction.skip + + return BuildAction.none + + +def build_container_options(options, detach, command): + container_options = { + 'command': command, + 'tty': not (detach or options['-T'] or not sys.stdin.isatty()), + 'stdin_open': not detach, + 'detach': detach, + } + + if options['-e']: + container_options['environment'] = parse_environment(options['-e']) + + if options['--entrypoint']: + container_options['entrypoint'] = options.get('--entrypoint') + + if options['--rm']: + container_options['restart'] = None + + if options['--user']: + container_options['user'] = options.get('--user') + + if not options['--service-ports']: + container_options['ports'] = [] + + if options['--publish']: + container_options['ports'] = options.get('--publish') + + if options['--name']: + container_options['name'] = options['--name'] + + if options['--workdir']: + container_options['working_dir'] = options['--workdir'] + + return container_options + + def run_one_off_container(container_options, project, service, options): if not options['--no-deps']: deps = service.get_dependency_names() @@ -756,13 +883,30 @@ def run_one_off_container(container_options, project, service, options): sys.exit(exit_code) -def build_log_printer(containers, service_names, monochrome, cascade_stop): - if service_names: - containers = [ - container - for container in containers if container.service in service_names - ] - return LogPrinter(containers, monochrome=monochrome, cascade_stop=cascade_stop) +def log_printer_from_project( + project, + containers, + monochrome, + log_args, + cascade_stop=False, + event_stream=None, +): + return LogPrinter( + containers, + build_log_presenters(project.service_names, monochrome), + event_stream or project.events(), + cascade_stop=cascade_stop, + log_args=log_args) + + +def filter_containers_to_service_names(containers, service_names): + if not service_names: + return containers + + return [ + container + for container in containers if container.service in service_names + ] @contextlib.contextmanager diff --git a/compose/cli/multiplexer.py b/compose/cli/multiplexer.py deleted file mode 100644 index e6e63f24b..000000000 --- a/compose/cli/multiplexer.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import absolute_import -from __future__ import unicode_literals - -from threading import Thread - -from six.moves import _thread as thread - -try: - from Queue import Queue, Empty -except ImportError: - from queue import Queue, Empty # Python 3.x - - -STOP = object() - - -class Multiplexer(object): - """ - Create a single iterator from several iterators by running all of them in - parallel and yielding results as they come in. - """ - - def __init__(self, iterators, cascade_stop=False): - self.iterators = iterators - self.cascade_stop = cascade_stop - self._num_running = len(iterators) - self.queue = Queue() - - def loop(self): - self._init_readers() - - while self._num_running > 0: - try: - item, exception = self.queue.get(timeout=0.1) - - if exception: - raise exception - - if item is STOP: - if self.cascade_stop is True: - break - else: - self._num_running -= 1 - else: - yield item - except Empty: - pass - # See https://github.com/docker/compose/issues/189 - except thread.error: - raise KeyboardInterrupt() - - def _init_readers(self): - for iterator in self.iterators: - t = Thread(target=_enqueue_output, args=(iterator, self.queue)) - t.daemon = True - t.start() - - -def _enqueue_output(iterator, queue): - try: - for item in iterator: - queue.put((item, None)) - queue.put((STOP, None)) - except Exception as e: - queue.put((None, e)) diff --git a/compose/config/__init__.py b/compose/config/__init__.py index dd01f221e..7cf71eb98 100644 --- a/compose/config/__init__.py +++ b/compose/config/__init__.py @@ -2,6 +2,7 @@ from __future__ import absolute_import from __future__ import unicode_literals +from . import environment from .config import ConfigurationError from .config import DOCKER_CONFIG_KEYS from .config import find diff --git a/compose/config/config.py b/compose/config/config.py index 48b34318b..dc3f56ea9 100644 --- a/compose/config/config.py +++ b/compose/config/config.py @@ -1,7 +1,6 @@ from __future__ import absolute_import from __future__ import unicode_literals -import codecs import functools import logging import operator @@ -17,6 +16,9 @@ from cached_property import cached_property from ..const import COMPOSEFILE_V1 as V1 from ..const import COMPOSEFILE_V2_0 as V2_0 from ..utils import build_string_dict +from .environment import env_vars_from_file +from .environment import Environment +from .environment import split_env from .errors import CircularReference from .errors import ComposeFileNotFound from .errors import ConfigurationError @@ -31,12 +33,12 @@ from .types import ServiceLink from .types import VolumeFromSpec from .types import VolumeSpec from .validation import match_named_volumes -from .validation import validate_against_fields_schema -from .validation import validate_against_service_schema +from .validation import validate_against_config_schema from .validation import validate_config_section from .validation import validate_depends_on from .validation import validate_extends_file_path from .validation import validate_network_mode +from .validation import validate_service_constraints from .validation import validate_top_level_object from .validation import validate_ulimits @@ -73,6 +75,7 @@ DOCKER_CONFIG_KEYS = [ 'read_only', 'restart', 'security_opt', + 'shm_size', 'stdin_open', 'stop_signal', 'tty', @@ -87,6 +90,8 @@ ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [ 'build', 'container_name', 'dockerfile', + 'log_driver', + 'log_opt', 'logging', 'network_mode', ] @@ -110,13 +115,21 @@ DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml' log = logging.getLogger(__name__) -class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files')): +class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')): """ :param working_dir: the directory to use for relative paths in the config :type working_dir: string :param config_files: list of configuration files to load :type config_files: list of :class:`ConfigFile` + :param environment: computed environment values for this project + :type environment: :class:`environment.Environment` """ + def __new__(cls, working_dir, config_files, environment=None): + if environment is None: + environment = Environment.from_env_file(working_dir) + return super(ConfigDetails, cls).__new__( + cls, working_dir, config_files, environment + ) class ConfigFile(namedtuple('_ConfigFile', 'filename config')): @@ -204,11 +217,13 @@ class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name conf config) -def find(base_dir, filenames): +def find(base_dir, filenames, environment): if filenames == ['-']: return ConfigDetails( os.getcwd(), - [ConfigFile(None, yaml.safe_load(sys.stdin))]) + [ConfigFile(None, yaml.safe_load(sys.stdin))], + environment + ) if filenames: filenames = [os.path.join(base_dir, f) for f in filenames] @@ -218,7 +233,9 @@ def find(base_dir, filenames): log.debug("Using configuration files: {}".format(",".join(filenames))) return ConfigDetails( os.path.dirname(filenames[0]), - [ConfigFile.from_filename(f) for f in filenames]) + [ConfigFile.from_filename(f) for f in filenames], + environment + ) def validate_config_version(config_files): @@ -286,7 +303,7 @@ def load(config_details): validate_config_version(config_details.config_files) processed_files = [ - process_config_file(config_file) + process_config_file(config_file, config_details.environment) for config_file in config_details.config_files ] config_details = config_details._replace(config_files=processed_files) @@ -298,10 +315,7 @@ def load(config_details): networks = load_mapping( config_details.config_files, 'get_networks', 'Network' ) - service_dicts = load_services( - config_details.working_dir, - main_file, - [file.get_service_dicts() for file in config_details.config_files]) + service_dicts = load_services(config_details, main_file) if main_file.version != V1: for service_dict in service_dicts: @@ -345,14 +359,16 @@ def load_mapping(config_files, get_func, entity_type): return mapping -def load_services(working_dir, config_file, service_configs): +def load_services(config_details, config_file): def build_service(service_name, service_dict, service_names): service_config = ServiceConfig.with_abs_paths( - working_dir, + config_details.working_dir, config_file.filename, service_name, service_dict) - resolver = ServiceExtendsResolver(service_config, config_file) + resolver = ServiceExtendsResolver( + service_config, config_file, environment=config_details.environment + ) service_dict = process_service(resolver.run()) service_config = service_config._replace(config=service_dict) @@ -360,7 +376,8 @@ def load_services(working_dir, config_file, service_configs): service_dict = finalize_service( service_config, service_names, - config_file.version) + config_file.version, + config_details.environment) return service_dict def build_services(service_config): @@ -380,6 +397,10 @@ def load_services(working_dir, config_file, service_configs): for name in all_service_names } + service_configs = [ + file.get_service_dicts() for file in config_details.config_files + ] + service_config = service_configs[0] for next_config in service_configs[1:]: service_config = merge_services(service_config, next_config) @@ -387,16 +408,17 @@ def load_services(working_dir, config_file, service_configs): return build_services(service_config) -def interpolate_config_section(filename, config, section): +def interpolate_config_section(filename, config, section, environment): validate_config_section(filename, config, section) - return interpolate_environment_variables(config, section) + return interpolate_environment_variables(config, section, environment) -def process_config_file(config_file, service_name=None): +def process_config_file(config_file, environment, service_name=None): services = interpolate_config_section( config_file.filename, config_file.get_service_dicts(), - 'service') + 'service', + environment,) if config_file.version == V2_0: processed_config = dict(config_file.config) @@ -404,17 +426,19 @@ def process_config_file(config_file, service_name=None): processed_config['volumes'] = interpolate_config_section( config_file.filename, config_file.get_volumes(), - 'volume') + 'volume', + environment,) processed_config['networks'] = interpolate_config_section( config_file.filename, config_file.get_networks(), - 'network') + 'network', + environment,) if config_file.version == V1: processed_config = services config_file = config_file._replace(config=processed_config) - validate_against_fields_schema(config_file) + validate_against_config_schema(config_file) if service_name and service_name not in services: raise ConfigurationError( @@ -425,11 +449,12 @@ def process_config_file(config_file, service_name=None): class ServiceExtendsResolver(object): - def __init__(self, service_config, config_file, already_seen=None): + def __init__(self, service_config, config_file, environment, already_seen=None): self.service_config = service_config self.working_dir = service_config.working_dir self.already_seen = already_seen or [] self.config_file = config_file + self.environment = environment @property def signature(self): @@ -459,8 +484,8 @@ class ServiceExtendsResolver(object): extends_file = ConfigFile.from_filename(config_path) validate_config_version([self.config_file, extends_file]) extended_file = process_config_file( - extends_file, - service_name=service_name) + extends_file, self.environment, service_name=service_name + ) service_config = extended_file.get_service(service_name) return config_path, service_config, service_name @@ -473,7 +498,9 @@ class ServiceExtendsResolver(object): service_name, service_dict), self.config_file, - already_seen=self.already_seen + [self.signature]) + already_seen=self.already_seen + [self.signature], + environment=self.environment + ) service_config = resolver.run() other_service_dict = process_service(service_config) @@ -502,7 +529,7 @@ class ServiceExtendsResolver(object): return filename -def resolve_environment(service_dict): +def resolve_environment(service_dict, environment=None): """Unpack any environment variables from an env_file, if set. Interpolate environment values if set. """ @@ -511,12 +538,12 @@ def resolve_environment(service_dict): env.update(env_vars_from_file(env_file)) env.update(parse_environment(service_dict.get('environment'))) - return dict(resolve_env_var(k, v) for k, v in six.iteritems(env)) + return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env)) -def resolve_build_args(build): +def resolve_build_args(build, environment): args = parse_build_arguments(build.get('args')) - return dict(resolve_env_var(k, v) for k, v in six.iteritems(args)) + return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args)) def validate_extended_service_dict(service_dict, filename, service): @@ -547,7 +574,7 @@ def validate_extended_service_dict(service_dict, filename, service): def validate_service(service_config, service_names, version): service_dict, service_name = service_config.config, service_config.name - validate_against_service_schema(service_dict, service_name, version) + validate_service_constraints(service_dict, service_name, version) validate_paths(service_dict) validate_ulimits(service_config) @@ -588,18 +615,18 @@ def process_service(service_config): if 'extra_hosts' in service_dict: service_dict['extra_hosts'] = parse_extra_hosts(service_dict['extra_hosts']) - for field in ['dns', 'dns_search']: + for field in ['dns', 'dns_search', 'tmpfs']: if field in service_dict: service_dict[field] = to_list(service_dict[field]) return service_dict -def finalize_service(service_config, service_names, version): +def finalize_service(service_config, service_names, version, environment): service_dict = dict(service_config.config) if 'environment' in service_dict or 'env_file' in service_dict: - service_dict['environment'] = resolve_environment(service_dict) + service_dict['environment'] = resolve_environment(service_dict, environment) service_dict.pop('env_file', None) if 'volumes_from' in service_dict: @@ -626,7 +653,7 @@ def finalize_service(service_config, service_names, version): if 'restart' in service_dict: service_dict['restart'] = parse_restart_spec(service_dict['restart']) - normalize_build(service_dict, service_config.working_dir) + normalize_build(service_dict, service_config.working_dir, environment) service_dict['name'] = service_config.name return normalize_v1_service_format(service_dict) @@ -727,7 +754,7 @@ def merge_service_dicts(base, override, version): ]: md.merge_field(field, operator.add, default=[]) - for field in ['dns', 'dns_search', 'env_file']: + for field in ['dns', 'dns_search', 'env_file', 'tmpfs']: md.merge_field(field, merge_list_or_string) for field in set(ALLOWED_KEYS) - set(md): @@ -774,15 +801,6 @@ def merge_environment(base, override): return env -def split_env(env): - if isinstance(env, six.binary_type): - env = env.decode('utf-8', 'replace') - if '=' in env: - return env.split('=', 1) - else: - return env, None - - def split_label(label): if '=' in label: return label.split('=', 1) @@ -820,30 +838,15 @@ def parse_ulimits(ulimits): return dict(ulimits) -def resolve_env_var(key, val): +def resolve_env_var(key, val, environment): if val is not None: return key, val - elif key in os.environ: - return key, os.environ[key] + elif environment and key in environment: + return key, environment[key] else: return key, None -def env_vars_from_file(filename): - """ - Read in a line delimited file of environment variables. - """ - if not os.path.exists(filename): - raise ConfigurationError("Couldn't find env file: %s" % filename) - env = {} - for line in codecs.open(filename, 'r', 'utf-8'): - line = line.strip() - if line and not line.startswith('#'): - k, v = split_env(line) - env[k] = v - return env - - def resolve_volume_paths(working_dir, service_dict): return [ resolve_volume_path(working_dir, volume) @@ -863,7 +866,7 @@ def resolve_volume_path(working_dir, volume): return container_path -def normalize_build(service_dict, working_dir): +def normalize_build(service_dict, working_dir, environment): if 'build' in service_dict: build = {} @@ -873,7 +876,9 @@ def normalize_build(service_dict, working_dir): else: build.update(service_dict['build']) if 'args' in build: - build['args'] = build_string_dict(resolve_build_args(build)) + build['args'] = build_string_dict( + resolve_build_args(build, environment) + ) service_dict['build'] = build diff --git a/compose/config/service_schema_v1.json b/compose/config/config_schema_v1.json similarity index 87% rename from compose/config/service_schema_v1.json rename to compose/config/config_schema_v1.json index d220ec548..36a937938 100644 --- a/compose/config/service_schema_v1.json +++ b/compose/config/config_schema_v1.json @@ -1,13 +1,16 @@ { "$schema": "http://json-schema.org/draft-04/schema#", - "id": "service_schema_v1.json", + "id": "config_schema_v1.json", "type": "object", - "allOf": [ - {"$ref": "#/definitions/service"}, - {"$ref": "#/definitions/constraints"} - ], + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + + "additionalProperties": false, "definitions": { "service": { @@ -98,6 +101,7 @@ "read_only": {"type": "boolean"}, "restart": {"type": "string"}, "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, "stdin_open": {"type": "boolean"}, "stop_signal": {"type": "string"}, "tty": {"type": "boolean"}, @@ -152,8 +156,7 @@ "type": "object", "patternProperties": { ".+": { - "type": ["string", "number", "boolean", "null"], - "format": "bool-value-in-mapping" + "type": ["string", "number", "null"] } }, "additionalProperties": false @@ -161,21 +164,24 @@ {"type": "array", "items": {"type": "string"}, "uniqueItems": true} ] }, + "constraints": { - "id": "#/definitions/constraints", - "anyOf": [ - { - "required": ["build"], - "not": {"required": ["image"]} - }, - { - "required": ["image"], - "not": {"anyOf": [ - {"required": ["build"]}, - {"required": ["dockerfile"]} - ]} - } - ] + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + { + "required": ["build"], + "not": {"required": ["image"]} + }, + { + "required": ["image"], + "not": {"anyOf": [ + {"required": ["build"]}, + {"required": ["dockerfile"]} + ]} + } + ] + } } } } diff --git a/compose/config/service_schema_v2.0.json b/compose/config/config_schema_v2.0.json similarity index 69% rename from compose/config/service_schema_v2.0.json rename to compose/config/config_schema_v2.0.json index 4c5c40fbc..e84d13179 100644 --- a/compose/config/service_schema_v2.0.json +++ b/compose/config/config_schema_v2.0.json @@ -1,15 +1,50 @@ { "$schema": "http://json-schema.org/draft-04/schema#", - "id": "service_schema_v2.0.json", - + "id": "config_schema_v2.0.json", "type": "object", - "allOf": [ - {"$ref": "#/definitions/service"}, - {"$ref": "#/definitions/constraints"} - ], + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, "definitions": { + "service": { "id": "#/definitions/service", "type": "object", @@ -23,20 +58,7 @@ "properties": { "context": {"type": "string"}, "dockerfile": {"type": "string"}, - "args": { - "oneOf": [ - {"$ref": "#/definitions/list_of_strings"}, - { - "type": "object", - "patternProperties": { - "^.+$": { - "type": ["string", "number"] - } - }, - "additionalProperties": false - } - ] - } + "args": {"$ref": "#/definitions/list_or_dict"} }, "additionalProperties": false } @@ -130,7 +152,9 @@ { "type": "object", "properties": { - "aliases": {"$ref": "#/definitions/list_of_strings"} + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} }, "additionalProperties": false }, @@ -157,8 +181,10 @@ "read_only": {"type": "boolean"}, "restart": {"type": "string"}, "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, "stdin_open": {"type": "boolean"}, "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, "tty": {"type": "boolean"}, "ulimits": { "type": "object", @@ -192,6 +218,60 @@ "additionalProperties": false }, + "network": { + "id": "#/definitions/network", + "type": "object", + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array" + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "additionalProperties": false + }, + "additionalProperties": false + }, + "string_or_list": { "oneOf": [ {"type": "string"}, @@ -211,8 +291,7 @@ "type": "object", "patternProperties": { ".+": { - "type": ["string", "number", "boolean", "null"], - "format": "bool-value-in-mapping" + "type": ["string", "number", "null"] } }, "additionalProperties": false @@ -220,15 +299,18 @@ {"type": "array", "items": {"type": "string"}, "uniqueItems": true} ] }, + "constraints": { - "id": "#/definitions/constraints", - "anyOf": [ + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ {"required": ["build"]}, {"required": ["image"]} - ], - "properties": { - "build": { - "required": ["context"] + ], + "properties": { + "build": { + "required": ["context"] + } } } } diff --git a/compose/config/environment.py b/compose/config/environment.py new file mode 100644 index 000000000..ad5c0b3da --- /dev/null +++ b/compose/config/environment.py @@ -0,0 +1,93 @@ +from __future__ import absolute_import +from __future__ import unicode_literals + +import codecs +import logging +import os + +import six + +from ..const import IS_WINDOWS_PLATFORM +from .errors import ConfigurationError + +log = logging.getLogger(__name__) + + +def split_env(env): + if isinstance(env, six.binary_type): + env = env.decode('utf-8', 'replace') + if '=' in env: + return env.split('=', 1) + else: + return env, None + + +def env_vars_from_file(filename): + """ + Read in a line delimited file of environment variables. + """ + if not os.path.exists(filename): + raise ConfigurationError("Couldn't find env file: %s" % filename) + env = {} + for line in codecs.open(filename, 'r', 'utf-8'): + line = line.strip() + if line and not line.startswith('#'): + k, v = split_env(line) + env[k] = v + return env + + +class Environment(dict): + def __init__(self, *args, **kwargs): + super(Environment, self).__init__(*args, **kwargs) + self.missing_keys = [] + + @classmethod + def from_env_file(cls, base_dir): + def _initialize(): + result = cls() + if base_dir is None: + return result + env_file_path = os.path.join(base_dir, '.env') + try: + return cls(env_vars_from_file(env_file_path)) + except ConfigurationError: + pass + return result + instance = _initialize() + instance.update(os.environ) + return instance + + def __getitem__(self, key): + try: + return super(Environment, self).__getitem__(key) + except KeyError: + if IS_WINDOWS_PLATFORM: + try: + return super(Environment, self).__getitem__(key.upper()) + except KeyError: + pass + if key not in self.missing_keys: + log.warn( + "The {} variable is not set. Defaulting to a blank string." + .format(key) + ) + self.missing_keys.append(key) + + return "" + + def __contains__(self, key): + result = super(Environment, self).__contains__(key) + if IS_WINDOWS_PLATFORM: + return ( + result or super(Environment, self).__contains__(key.upper()) + ) + return result + + def get(self, key, *args, **kwargs): + if IS_WINDOWS_PLATFORM: + return super(Environment, self).get( + key, + super(Environment, self).get(key.upper(), *args, **kwargs) + ) + return super(Environment, self).get(key, *args, **kwargs) diff --git a/compose/config/errors.py b/compose/config/errors.py index f94ac7acd..d5df7ae55 100644 --- a/compose/config/errors.py +++ b/compose/config/errors.py @@ -38,7 +38,8 @@ class CircularReference(ConfigurationError): class ComposeFileNotFound(ConfigurationError): def __init__(self, supported_filenames): super(ComposeFileNotFound, self).__init__(""" - Can't find a suitable configuration file in this directory or any parent. Are you in the right directory? + Can't find a suitable configuration file in this directory or any + parent. Are you in the right directory? Supported filenames: %s """ % ", ".join(supported_filenames)) diff --git a/compose/config/fields_schema_v1.json b/compose/config/fields_schema_v1.json deleted file mode 100644 index 8f6a8c0ad..000000000 --- a/compose/config/fields_schema_v1.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - - "type": "object", - "id": "fields_schema_v1.json", - - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "service_schema_v1.json#/definitions/service" - } - }, - "additionalProperties": false -} diff --git a/compose/config/fields_schema_v2.0.json b/compose/config/fields_schema_v2.0.json deleted file mode 100644 index 7703adcd0..000000000 --- a/compose/config/fields_schema_v2.0.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "id": "fields_schema_v2.0.json", - - "properties": { - "version": { - "type": "string" - }, - "services": { - "id": "#/properties/services", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "service_schema_v2.0.json#/definitions/service" - } - }, - "additionalProperties": false - }, - "networks": { - "id": "#/properties/networks", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/network" - } - } - }, - "volumes": { - "id": "#/properties/volumes", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/volume" - } - }, - "additionalProperties": false - } - }, - - "definitions": { - "network": { - "id": "#/definitions/network", - "type": "object", - "properties": { - "driver": {"type": "string"}, - "driver_opts": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number"]} - } - }, - "ipam": { - "type": "object", - "properties": { - "driver": {"type": "string"}, - "config": { - "type": "array" - } - }, - "additionalProperties": false - }, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - "volume": { - "id": "#/definitions/volume", - "type": ["object", "null"], - "properties": { - "driver": {"type": "string"}, - "driver_opts": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number"]} - } - }, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - } - }, - "additionalProperties": false - }, - "additionalProperties": false - } - }, - "additionalProperties": false -} diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py index 1e56ebb66..63020d91a 100644 --- a/compose/config/interpolation.py +++ b/compose/config/interpolation.py @@ -2,7 +2,6 @@ from __future__ import absolute_import from __future__ import unicode_literals import logging -import os from string import Template import six @@ -11,12 +10,11 @@ from .errors import ConfigurationError log = logging.getLogger(__name__) -def interpolate_environment_variables(config, section): - mapping = BlankDefaultDict(os.environ) +def interpolate_environment_variables(config, section, environment): def process_item(name, config_dict): return dict( - (key, interpolate_value(name, key, val, section, mapping)) + (key, interpolate_value(name, key, val, section, environment)) for key, val in (config_dict or {}).items() ) @@ -60,25 +58,6 @@ def interpolate(string, mapping): raise InvalidInterpolation(string) -class BlankDefaultDict(dict): - def __init__(self, *args, **kwargs): - super(BlankDefaultDict, self).__init__(*args, **kwargs) - self.missing_keys = [] - - def __getitem__(self, key): - try: - return super(BlankDefaultDict, self).__getitem__(key) - except KeyError: - if key not in self.missing_keys: - log.warn( - "The {} variable is not set. Defaulting to a blank string." - .format(key) - ) - self.missing_keys.append(key) - - return "" - - class InvalidInterpolation(Exception): def __init__(self, string): self.string = string diff --git a/compose/config/sort_services.py b/compose/config/sort_services.py index 9d29f329e..20ac4461b 100644 --- a/compose/config/sort_services.py +++ b/compose/config/sort_services.py @@ -23,28 +23,31 @@ def get_source_name_from_network_mode(network_mode, source_type): return net_name +def get_service_names(links): + return [link.split(':')[0] for link in links] + + +def get_service_names_from_volumes_from(volumes_from): + return [volume_from.source for volume_from in volumes_from] + + +def get_service_dependents(service_dict, services): + name = service_dict['name'] + return [ + service for service in services + if (name in get_service_names(service.get('links', [])) or + name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or + name == get_service_name_from_network_mode(service.get('network_mode')) or + name in service.get('depends_on', [])) + ] + + def sort_service_dicts(services): # Topological sort (Cormen/Tarjan algorithm). unmarked = services[:] temporary_marked = set() sorted_services = [] - def get_service_names(links): - return [link.split(':')[0] for link in links] - - def get_service_names_from_volumes_from(volumes_from): - return [volume_from.source for volume_from in volumes_from] - - def get_service_dependents(service_dict, services): - name = service_dict['name'] - return [ - service for service in services - if (name in get_service_names(service.get('links', [])) or - name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or - name == get_service_name_from_network_mode(service.get('network_mode')) or - name in service.get('depends_on', [])) - ] - def visit(n): if n['name'] in temporary_marked: if n['name'] in get_service_names(n.get('links', [])): diff --git a/compose/config/validation.py b/compose/config/validation.py index 60ee5c930..088bec3fc 100644 --- a/compose/config/validation.py +++ b/compose/config/validation.py @@ -14,6 +14,7 @@ from jsonschema import FormatChecker from jsonschema import RefResolver from jsonschema import ValidationError +from ..const import COMPOSEFILE_V1 as V1 from .errors import ConfigurationError from .errors import VERSION_EXPLANATION from .sort_services import get_service_name_from_network_mode @@ -62,23 +63,6 @@ def format_expose(instance): return True -@FormatChecker.cls_checks(format="bool-value-in-mapping") -def format_boolean_in_environment(instance): - """Check if there is a boolean in the mapping sections and display a warning. - Always return True here so the validation won't raise an error. - """ - if isinstance(instance, bool): - log.warn( - "There is a boolean value in the 'environment', 'labels', or " - "'extra_hosts' field of a service.\n" - "These sections only support string values.\n" - "Please add quotes to any boolean values to make them strings " - "(eg, 'True', 'false', 'yes', 'N', 'on', 'Off').\n" - "This warning will become an error in a future release. \r\n" - ) - return True - - def match_named_volumes(service_dict, project_volumes): service_volumes = service_dict.get('volumes', []) for volume_spec in service_volumes: @@ -209,7 +193,7 @@ def anglicize_json_type(json_type): def is_service_dict_schema(schema_id): - return schema_id == 'fields_schema_v1.json' or schema_id == '#/properties/services' + return schema_id in ('config_schema_v1.json', '#/properties/services') def handle_error_for_schema_with_id(error, path): @@ -221,35 +205,6 @@ def handle_error_for_schema_with_id(error, path): list(error.instance)[0], VALID_NAME_CHARS) - if schema_id == '#/definitions/constraints': - # Build context could in 'build' or 'build.context' and dockerfile could be - # in 'dockerfile' or 'build.dockerfile' - context = False - dockerfile = 'dockerfile' in error.instance - if 'build' in error.instance: - if isinstance(error.instance['build'], six.string_types): - context = True - else: - context = 'context' in error.instance['build'] - dockerfile = dockerfile or 'dockerfile' in error.instance['build'] - - # TODO: only applies to v1 - if 'image' in error.instance and context: - return ( - "{} has both an image and build path specified. " - "A service can either be built to image or use an existing " - "image, not both.".format(path_string(path))) - if 'image' not in error.instance and not context: - return ( - "{} has neither an image nor a build path specified. " - "At least one must be provided.".format(path_string(path))) - # TODO: only applies to v1 - if 'image' in error.instance and dockerfile: - return ( - "{} has both an image and alternate Dockerfile. " - "A service can either be built to image or use an existing " - "image, not both.".format(path_string(path))) - if error.validator == 'additionalProperties': if schema_id == '#/definitions/service': invalid_config_key = parse_key_from_error_msg(error) @@ -259,7 +214,7 @@ def handle_error_for_schema_with_id(error, path): return '{}\n{}'.format(error.message, VERSION_EXPLANATION) -def handle_generic_service_error(error, path): +def handle_generic_error(error, path): msg_format = None error_msg = error.message @@ -365,75 +320,94 @@ def _parse_oneof_validator(error): return (None, "contains an invalid type, it should be {}".format(valid_types)) -def process_errors(errors, path_prefix=None): - """jsonschema gives us an error tree full of information to explain what has +def process_service_constraint_errors(error, service_name, version): + if version == V1: + if 'image' in error.instance and 'build' in error.instance: + return ( + "Service {} has both an image and build path specified. " + "A service can either be built to image or use an existing " + "image, not both.".format(service_name)) + + if 'image' in error.instance and 'dockerfile' in error.instance: + return ( + "Service {} has both an image and alternate Dockerfile. " + "A service can either be built to image or use an existing " + "image, not both.".format(service_name)) + + if 'image' not in error.instance and 'build' not in error.instance: + return ( + "Service {} has neither an image nor a build context specified. " + "At least one must be provided.".format(service_name)) + + +def process_config_schema_errors(error): + path = list(error.path) + + if 'id' in error.schema: + error_msg = handle_error_for_schema_with_id(error, path) + if error_msg: + return error_msg + + return handle_generic_error(error, path) + + +def validate_against_config_schema(config_file): + schema = load_jsonschema(config_file.version) + format_checker = FormatChecker(["ports", "expose"]) + validator = Draft4Validator( + schema, + resolver=RefResolver(get_resolver_path(), schema), + format_checker=format_checker) + handle_errors( + validator.iter_errors(config_file.config), + process_config_schema_errors, + config_file.filename) + + +def validate_service_constraints(config, service_name, version): + def handler(errors): + return process_service_constraint_errors(errors, service_name, version) + + schema = load_jsonschema(version) + validator = Draft4Validator(schema['definitions']['constraints']['service']) + handle_errors(validator.iter_errors(config), handler, None) + + +def get_schema_path(): + return os.path.dirname(os.path.abspath(__file__)) + + +def load_jsonschema(version): + filename = os.path.join( + get_schema_path(), + "config_schema_v{0}.json".format(version)) + + with open(filename, "r") as fh: + return json.load(fh) + + +def get_resolver_path(): + schema_path = get_schema_path() + if sys.platform == "win32": + scheme = "///" + # TODO: why is this necessary? + schema_path = schema_path.replace('\\', '/') + else: + scheme = "//" + return "file:{}{}/".format(scheme, schema_path) + + +def handle_errors(errors, format_error_func, filename): + """jsonschema returns an error tree full of information to explain what has gone wrong. Process each error and pull out relevant information and re-write helpful error messages that are relevant. """ - path_prefix = path_prefix or [] - - def format_error_message(error): - path = path_prefix + list(error.path) - - if 'id' in error.schema: - error_msg = handle_error_for_schema_with_id(error, path) - if error_msg: - return error_msg - - return handle_generic_service_error(error, path) - - return '\n'.join(format_error_message(error) for error in errors) - - -def validate_against_fields_schema(config_file): - schema_filename = "fields_schema_v{0}.json".format(config_file.version) - _validate_against_schema( - config_file.config, - schema_filename, - format_checker=["ports", "expose", "bool-value-in-mapping"], - filename=config_file.filename) - - -def validate_against_service_schema(config, service_name, version): - _validate_against_schema( - config, - "service_schema_v{0}.json".format(version), - format_checker=["ports"], - path_prefix=[service_name]) - - -def _validate_against_schema( - config, - schema_filename, - format_checker=(), - path_prefix=None, - filename=None): - config_source_dir = os.path.dirname(os.path.abspath(__file__)) - - if sys.platform == "win32": - file_pre_fix = "///" - config_source_dir = config_source_dir.replace('\\', '/') - else: - file_pre_fix = "//" - - resolver_full_path = "file:{}{}/".format(file_pre_fix, config_source_dir) - schema_file = os.path.join(config_source_dir, schema_filename) - - with open(schema_file, "r") as schema_fh: - schema = json.load(schema_fh) - - resolver = RefResolver(resolver_full_path, schema) - validation_output = Draft4Validator( - schema, - resolver=resolver, - format_checker=FormatChecker(format_checker)) - - errors = [error for error in sorted(validation_output.iter_errors(config), key=str)] + errors = list(sorted(errors, key=str)) if not errors: return - error_msg = process_errors(errors, path_prefix=path_prefix) - file_msg = " in file '{}'".format(filename) if filename else '' - raise ConfigurationError("Validation failed{}, reason(s):\n{}".format( - file_msg, - error_msg)) + error_msg = '\n'.join(format_error_func(error) for error in errors) + raise ConfigurationError( + "Validation failed{file_msg}, reason(s):\n{error_msg}".format( + file_msg=" in file '{}'".format(filename) if filename else "", + error_msg=error_msg)) diff --git a/compose/const.py b/compose/const.py index db5e2fb4f..9e00d96e9 100644 --- a/compose/const.py +++ b/compose/const.py @@ -5,7 +5,7 @@ import os import sys DEFAULT_TIMEOUT = 10 -HTTP_TIMEOUT = int(os.environ.get('COMPOSE_HTTP_TIMEOUT', os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))) +HTTP_TIMEOUT = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60)) IMAGE_EVENTS = ['delete', 'import', 'pull', 'push', 'tag', 'untag'] IS_WINDOWS_PLATFORM = (sys.platform == "win32") LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number' diff --git a/compose/container.py b/compose/container.py index c96b63ef4..2c16863df 100644 --- a/compose/container.py +++ b/compose/container.py @@ -39,7 +39,7 @@ class Container(object): @classmethod def from_id(cls, client, id): - return cls(client, client.inspect_container(id)) + return cls(client, client.inspect_container(id), has_been_inspected=True) @classmethod def create(cls, client, **options): @@ -216,6 +216,12 @@ class Container(object): def remove(self, **options): return self.client.remove_container(self.id, **options) + def create_exec(self, command, **options): + return self.client.exec_create(self.id, command, **options) + + def start_exec(self, exec_id, **options): + return self.client.exec_start(exec_id, **options) + def rename_to_tmp_name(self): """Rename the container to a hopefully unique temporary container name by prepending the short id. diff --git a/compose/network.py b/compose/network.py index 135502cc0..affba7c2d 100644 --- a/compose/network.py +++ b/compose/network.py @@ -149,7 +149,10 @@ class ProjectNetworks(object): if not self.use_networking: return for network in self.networks.values(): - network.remove() + try: + network.remove() + except NotFound: + log.warn("Network %s not found.", network.full_name) def initialize(self): if not self.use_networking: @@ -159,26 +162,26 @@ class ProjectNetworks(object): network.ensure() -def get_network_aliases_for_service(service_dict): +def get_network_defs_for_service(service_dict): if 'network_mode' in service_dict: return {} networks = service_dict.get('networks', {'default': None}) return dict( - (net, (config or {}).get('aliases', [])) + (net, (config or {})) for net, config in networks.items() ) def get_network_names_for_service(service_dict): - return get_network_aliases_for_service(service_dict).keys() + return get_network_defs_for_service(service_dict).keys() def get_networks(service_dict, network_definitions): networks = {} - for name, aliases in get_network_aliases_for_service(service_dict).items(): + for name, netdef in get_network_defs_for_service(service_dict).items(): network = network_definitions.get(name) if network: - networks[network.full_name] = aliases + networks[network.full_name] = netdef else: raise ConfigurationError( 'Service "{}" uses an undefined network "{}"' diff --git a/compose/parallel.py b/compose/parallel.py index b8415e5e5..63417dcb0 100644 --- a/compose/parallel.py +++ b/compose/parallel.py @@ -1,71 +1,186 @@ from __future__ import absolute_import from __future__ import unicode_literals +import logging import operator import sys from threading import Thread from docker.errors import APIError +from six.moves import _thread as thread from six.moves.queue import Empty from six.moves.queue import Queue +from compose.cli.signals import ShutdownException from compose.utils import get_output_stream -def perform_operation(func, arg, callback, index): - try: - callback((index, func(arg))) - except Exception as e: - callback((index, e)) +log = logging.getLogger(__name__) + +STOP = object() -def parallel_execute(objects, func, index_func, msg): - """For a given list of objects, call the callable passing in the first - object we give it. +def parallel_execute(objects, func, get_name, msg, get_deps=None): + """Runs func on objects in parallel while ensuring that func is + ran on object only after it is ran on all its dependencies. + + get_deps called on object must return a collection with its dependencies. + get_name called on object must return its name. """ objects = list(objects) stream = get_output_stream(sys.stderr) + writer = ParallelStreamWriter(stream, msg) - for obj in objects: - writer.initialize(index_func(obj)) + writer.initialize(get_name(obj)) - q = Queue() + events = parallel_execute_iter(objects, func, get_deps) - # TODO: limit the number of threads #1828 - for obj in objects: - t = Thread( - target=perform_operation, - args=(func, obj, q.put, index_func(obj))) - t.daemon = True - t.start() - - done = 0 errors = {} + results = [] + error_to_reraise = None + + for obj, result, exception in events: + if exception is None: + writer.write(get_name(obj), 'done') + results.append(result) + elif isinstance(exception, APIError): + errors[get_name(obj)] = exception.explanation + writer.write(get_name(obj), 'error') + elif isinstance(exception, UpstreamError): + writer.write(get_name(obj), 'error') + else: + errors[get_name(obj)] = exception + error_to_reraise = exception + + for obj_name, error in errors.items(): + stream.write("\nERROR: for {} {}\n".format(obj_name, error)) + + if error_to_reraise: + raise error_to_reraise + + return results + + +def _no_deps(x): + return [] + + +class State(object): + """ + Holds the state of a partially-complete parallel operation. + + state.started: objects being processed + state.finished: objects which have been processed + state.failed: objects which either failed or whose dependencies failed + """ + def __init__(self, objects): + self.objects = objects + + self.started = set() + self.finished = set() + self.failed = set() + + def is_done(self): + return len(self.finished) + len(self.failed) >= len(self.objects) + + def pending(self): + return set(self.objects) - self.started - self.finished - self.failed + + +def parallel_execute_iter(objects, func, get_deps): + """ + Runs func on objects in parallel while ensuring that func is + ran on object only after it is ran on all its dependencies. + + Returns an iterator of tuples which look like: + + # if func returned normally when run on object + (object, result, None) + + # if func raised an exception when run on object + (object, None, exception) + + # if func raised an exception when run on one of object's dependencies + (object, None, UpstreamError()) + """ + if get_deps is None: + get_deps = _no_deps + + results = Queue() + state = State(objects) + + while True: + feed_queue(objects, func, get_deps, results, state) - while done < len(objects): try: - msg_index, result = q.get(timeout=1) + event = results.get(timeout=0.1) except Empty: continue + # See https://github.com/docker/compose/issues/189 + except thread.error: + raise ShutdownException() - if isinstance(result, APIError): - errors[msg_index] = "error", result.explanation - writer.write(msg_index, 'error') - elif isinstance(result, Exception): - errors[msg_index] = "unexpected_exception", result + if event is STOP: + break + + obj, _, exception = event + if exception is None: + log.debug('Finished processing: {}'.format(obj)) + state.finished.add(obj) else: - writer.write(msg_index, 'done') - done += 1 + log.debug('Failed: {}'.format(obj)) + state.failed.add(obj) - if not errors: - return + yield event - stream.write("\n") - for msg_index, (result, error) in errors.items(): - stream.write("ERROR: for {} {} \n".format(msg_index, error)) - if result == 'unexpected_exception': - raise error + +def producer(obj, func, results): + """ + The entry point for a producer thread which runs func on a single object. + Places a tuple on the results queue once func has either returned or raised. + """ + try: + result = func(obj) + results.put((obj, result, None)) + except Exception as e: + results.put((obj, None, e)) + + +def feed_queue(objects, func, get_deps, results, state): + """ + Starts producer threads for any objects which are ready to be processed + (i.e. they have no dependencies which haven't been successfully processed). + + Shortcuts any objects whose dependencies have failed and places an + (object, None, UpstreamError()) tuple on the results queue. + """ + pending = state.pending() + log.debug('Pending: {}'.format(pending)) + + for obj in pending: + deps = get_deps(obj) + + if any(dep in state.failed for dep in deps): + log.debug('{} has upstream errors - not processing'.format(obj)) + results.put((obj, None, UpstreamError())) + state.failed.add(obj) + elif all( + dep not in objects or dep in state.finished + for dep in deps + ): + log.debug('Starting producer thread for {}'.format(obj)) + t = Thread(target=producer, args=(obj, func, results)) + t.daemon = True + t.start() + state.started.add(obj) + + if state.is_done(): + results.put(STOP) + + +class UpstreamError(Exception): + pass class ParallelStreamWriter(object): @@ -81,11 +196,15 @@ class ParallelStreamWriter(object): self.lines = [] def initialize(self, obj_index): + if self.msg is None: + return self.lines.append(obj_index) self.stream.write("{} {} ... \r\n".format(self.msg, obj_index)) self.stream.flush() def write(self, obj_index, status): + if self.msg is None: + return position = self.lines.index(obj_index) diff = len(self.lines) - position # move up @@ -111,10 +230,6 @@ def parallel_remove(containers, options): parallel_operation(stopped_containers, 'remove', options, 'Removing') -def parallel_stop(containers, options): - parallel_operation(containers, 'stop', options, 'Stopping') - - def parallel_start(containers, options): parallel_operation(containers, 'start', options, 'Starting') diff --git a/compose/project.py b/compose/project.py index cfb11aa05..0d891e455 100644 --- a/compose/project.py +++ b/compose/project.py @@ -3,8 +3,10 @@ from __future__ import unicode_literals import datetime import logging +import operator from functools import reduce +import enum from docker.errors import APIError from . import parallel @@ -21,6 +23,7 @@ from .container import Container from .network import build_networks from .network import get_networks from .network import ProjectNetworks +from .service import BuildAction from .service import ContainerNetworkMode from .service import ConvergenceStrategy from .service import NetworkMode @@ -33,6 +36,24 @@ from .volume import ProjectVolumes log = logging.getLogger(__name__) +@enum.unique +class OneOffFilter(enum.Enum): + include = 0 + exclude = 1 + only = 2 + + @classmethod + def update_labels(cls, value, labels): + if value == cls.only: + labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True")) + elif value == cls.exclude: + labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False")) + elif value == cls.include: + pass + else: + raise ValueError("Invalid value for one_off: {}".format(repr(value))) + + class Project(object): """ A collection of services. @@ -44,11 +65,11 @@ class Project(object): self.volumes = volumes or ProjectVolumes({}) self.networks = networks or ProjectNetworks({}, False) - def labels(self, one_off=False): - return [ - '{0}={1}'.format(LABEL_PROJECT, self.name), - '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"), - ] + def labels(self, one_off=OneOffFilter.exclude): + labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)] + + OneOffFilter.update_labels(one_off, labels) + return labels @classmethod def from_config(cls, name, config_data, client): @@ -199,13 +220,40 @@ class Project(object): def start(self, service_names=None, **options): containers = [] - for service in self.get_services(service_names): - service_containers = service.start(**options) + + def start_service(service): + service_containers = service.start(quiet=True, **options) containers.extend(service_containers) + + services = self.get_services(service_names) + + def get_deps(service): + return {self.get_service(dep) for dep in service.get_dependency_names()} + + parallel.parallel_execute( + services, + start_service, + operator.attrgetter('name'), + 'Starting', + get_deps) + return containers - def stop(self, service_names=None, **options): - parallel.parallel_stop(self.containers(service_names), options) + def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options): + containers = self.containers(service_names, one_off=one_off) + + def get_deps(container): + # actually returning inversed dependencies + return {other for other in containers + if container.service in + self.get_service(other.service).get_dependency_names()} + + parallel.parallel_execute( + containers, + operator.methodcaller('stop', **options), + operator.attrgetter('name'), + 'Stopping', + get_deps) def pause(self, service_names=None, **options): containers = self.containers(service_names) @@ -220,12 +268,16 @@ class Project(object): def kill(self, service_names=None, **options): parallel.parallel_kill(self.containers(service_names), options) - def remove_stopped(self, service_names=None, **options): - parallel.parallel_remove(self.containers(service_names, stopped=True), options) + def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options): + parallel.parallel_remove(self.containers( + service_names, stopped=True, one_off=one_off + ), options) + + def down(self, remove_image_type, include_volumes, remove_orphans=False): + self.stop(one_off=OneOffFilter.include) + self.find_orphan_containers(remove_orphans) + self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include) - def down(self, remove_image_type, include_volumes): - self.stop() - self.remove_stopped(v=include_volumes) self.networks.remove() if include_volumes: @@ -249,19 +301,25 @@ class Project(object): else: log.info('%s uses an image, skipping' % service.name) - def create(self, service_names=None, strategy=ConvergenceStrategy.changed, do_build=True): + def create( + self, + service_names=None, + strategy=ConvergenceStrategy.changed, + do_build=BuildAction.none, + ): services = self.get_services_without_duplicate(service_names, include_deps=True) + for svc in services: + svc.ensure_image_exists(do_build=do_build) plans = self._get_convergence_plans(services, strategy) for service in services: service.execute_convergence_plan( plans[service.name], - do_build, detached=True, start=False) - def events(self): + def events(self, service_names=None): def build_container_event(event, container): time = datetime.datetime.fromtimestamp(event['time']) time = time.replace( @@ -275,10 +333,11 @@ class Project(object): 'attributes': { 'name': container.name, 'image': event['from'], - } + }, + 'container': container, } - service_names = set(self.service_names) + service_names = set(service_names or self.service_names) for event in self.client.events( filters={'label': self.labels()}, decode=True @@ -289,7 +348,11 @@ class Project(object): continue # TODO: get labels from the API v1.22 , see github issue 2618 - container = Container.from_id(self.client, event['id']) + try: + # this can fail if the conatiner has been removed + container = Container.from_id(self.client, event['id']) + except APIError: + continue if container.service not in service_names: continue yield build_container_event(event, container) @@ -298,25 +361,44 @@ class Project(object): service_names=None, start_deps=True, strategy=ConvergenceStrategy.changed, - do_build=True, + do_build=BuildAction.none, timeout=DEFAULT_TIMEOUT, - detached=False): + detached=False, + remove_orphans=False): self.initialize() + self.find_orphan_containers(remove_orphans) + services = self.get_services_without_duplicate( service_names, include_deps=start_deps) + for svc in services: + svc.ensure_image_exists(do_build=do_build) plans = self._get_convergence_plans(services, strategy) - return [ - container - for service in services - for container in service.execute_convergence_plan( + + def do(service): + return service.execute_convergence_plan( plans[service.name], - do_build=do_build, timeout=timeout, detached=detached ) + + def get_deps(service): + return {self.get_service(dep) for dep in service.get_dependency_names()} + + results = parallel.parallel_execute( + services, + do, + operator.attrgetter('name'), + None, + get_deps + ) + return [ + container + for svc_containers in results + if svc_containers is not None + for container in svc_containers ] def initialize(self): @@ -350,23 +432,52 @@ class Project(object): for service in self.get_services(service_names, include_deps=False): service.pull(ignore_pull_failures) - def containers(self, service_names=None, stopped=False, one_off=False): + def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude): + return list(filter(None, [ + Container.from_ps(self.client, container) + for container in self.client.containers( + all=stopped, + filters={'label': self.labels(one_off=one_off)})]) + ) + + def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude): if service_names: self.validate_service_names(service_names) else: service_names = self.service_names - containers = list(filter(None, [ - Container.from_ps(self.client, container) - for container in self.client.containers( - all=stopped, - filters={'label': self.labels(one_off=one_off)})])) + containers = self._labeled_containers(stopped, one_off) def matches_service_names(container): return container.labels.get(LABEL_SERVICE) in service_names return [c for c in containers if matches_service_names(c)] + def find_orphan_containers(self, remove_orphans): + def _find(): + containers = self._labeled_containers() + for ctnr in containers: + service_name = ctnr.labels.get(LABEL_SERVICE) + if service_name not in self.service_names: + yield ctnr + orphans = list(_find()) + if not orphans: + return + if remove_orphans: + for ctnr in orphans: + log.info('Removing orphan container "{0}"'.format(ctnr.name)) + ctnr.kill() + ctnr.remove(force=True) + else: + log.warning( + 'Found orphan containers ({0}) for this project. If ' + 'you removed or renamed this service in your compose ' + 'file, you can run this command with the ' + '--remove-orphans flag to clean it up.'.format( + ', '.join(["{}".format(ctnr.name) for ctnr in orphans]) + ) + ) + def _inject_deps(self, acc, service): dep_names = service.get_dependency_names() diff --git a/compose/service.py b/compose/service.py index 4e169daae..e0f238882 100644 --- a/compose/service.py +++ b/compose/service.py @@ -40,6 +40,7 @@ DOCKER_START_KEYS = [ 'cap_add', 'cap_drop', 'cgroup_parent', + 'cpu_quota', 'devices', 'dns', 'dns_search', @@ -54,9 +55,9 @@ DOCKER_START_KEYS = [ 'pid', 'privileged', 'restart', - 'volumes_from', 'security_opt', - 'cpu_quota', + 'shm_size', + 'volumes_from', ] @@ -103,6 +104,14 @@ class ImageType(enum.Enum): all = 2 +@enum.unique +class BuildAction(enum.Enum): + """Enumeration for the possible build actions.""" + none = 0 + force = 1 + skip = 2 + + class Service(object): def __init__( self, @@ -126,6 +135,9 @@ class Service(object): self.networks = networks or {} self.options = options + def __repr__(self): + return ''.format(self.name) + def containers(self, stopped=False, one_off=False, filters={}): filters.update({'label': self.labels(one_off=one_off)}) @@ -161,11 +173,11 @@ class Service(object): - starts containers until there are at least `desired_num` running - removes all stopped containers """ - if self.custom_container_name() and desired_num > 1: + if self.custom_container_name and desired_num > 1: log.warn('The "%s" service is using the custom container name "%s". ' 'Docker requires each container to have a unique name. ' 'Remove the custom name to scale the service.' - % (self.name, self.custom_container_name())) + % (self.name, self.custom_container_name)) if self.specifies_host_port(): log.warn('The "%s" service specifies a port on the host. If multiple containers ' @@ -195,7 +207,9 @@ class Service(object): if num_running != len(all_containers): # we have some stopped containers, let's start them up again - stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number')) + stopped_containers = sorted( + (c for c in all_containers if not c.is_running), + key=attrgetter('number')) num_stopped = len(stopped_containers) @@ -220,7 +234,7 @@ class Service(object): parallel_execute( container_numbers, lambda n: create_and_start(service=self, number=n), - lambda n: n, + lambda n: self.get_container_name(n), "Creating and starting" ) @@ -240,7 +254,6 @@ class Service(object): def create_container(self, one_off=False, - do_build=True, previous_container=None, number=None, quiet=False, @@ -249,7 +262,9 @@ class Service(object): Create a container for this service. If the image doesn't exist, attempt to pull it. """ - self.ensure_image_exists(do_build=do_build) + # This is only necessary for `scale` and `volumes_from` + # auto-creating containers to satisfy the dependency. + self.ensure_image_exists() container_options = self._get_container_create_options( override_options, @@ -263,20 +278,29 @@ class Service(object): return Container.create(self.client, **container_options) - def ensure_image_exists(self, do_build=True): + def ensure_image_exists(self, do_build=BuildAction.none): + if self.can_be_built() and do_build == BuildAction.force: + self.build() + return + try: self.image() return except NoSuchImageError: pass - if self.can_be_built(): - if do_build: - self.build() - else: - raise NeedsBuildError(self) - else: + if not self.can_be_built(): self.pull() + return + + if do_build == BuildAction.skip: + raise NeedsBuildError(self) + + self.build() + log.warn( + "Image for service {} was built because it did not already exist. To " + "rebuild this image you must use `docker-compose build` or " + "`docker-compose up --build`.".format(self.name)) def image(self): try: @@ -340,7 +364,6 @@ class Service(object): def execute_convergence_plan(self, plan, - do_build=True, timeout=DEFAULT_TIMEOUT, detached=False, start=True): @@ -348,7 +371,7 @@ class Service(object): should_attach_logs = not detached if action == 'create': - container = self.create_container(do_build=do_build) + container = self.create_container() if should_attach_logs: container.attach_log_stream() @@ -362,7 +385,6 @@ class Service(object): return [ self.recreate_container( container, - do_build=do_build, timeout=timeout, attach_logs=should_attach_logs, start_new_container=start @@ -389,7 +411,6 @@ class Service(object): def recreate_container( self, container, - do_build=False, timeout=DEFAULT_TIMEOUT, attach_logs=False, start_new_container=True): @@ -404,7 +425,6 @@ class Service(object): container.stop(timeout=timeout) container.rename_to_tmp_name() new_container = self.create_container( - do_build=do_build, previous_container=container, number=container.labels.get(LABEL_CONTAINER_NUMBER), quiet=True, @@ -416,9 +436,10 @@ class Service(object): container.remove() return new_container - def start_container_if_stopped(self, container, attach_logs=False): + def start_container_if_stopped(self, container, attach_logs=False, quiet=False): if not container.is_running: - log.info("Starting %s" % container.name) + if not quiet: + log.info("Starting %s" % container.name) if attach_logs: container.attach_log_stream() return self.start_container(container) @@ -431,7 +452,10 @@ class Service(object): def connect_container_to_networks(self, container): connected_networks = container.get('NetworkSettings.Networks') - for network, aliases in self.networks.items(): + for network, netdefs in self.networks.items(): + aliases = netdefs.get('aliases', []) + ipv4_address = netdefs.get('ipv4_address', None) + ipv6_address = netdefs.get('ipv6_address', None) if network in connected_networks: self.client.disconnect_container_from_network( container.id, network) @@ -439,7 +463,9 @@ class Service(object): self.client.connect_container_to_network( container.id, network, aliases=list(self._get_aliases(container).union(aliases)), - links=self._get_links(False), + ipv4_address=ipv4_address, + ipv6_address=ipv6_address, + links=self._get_links(False) ) def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT): @@ -472,7 +498,7 @@ class Service(object): 'image_id': self.image()['Id'], 'links': self.get_link_names(), 'net': self.network_mode.id, - 'networks': list(self.networks.keys()), + 'networks': self.networks, 'volumes_from': [ (v.source.name, v.mode) for v in self.volumes_from if isinstance(v.source, Service) @@ -495,10 +521,6 @@ class Service(object): def get_volumes_from_names(self): return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)] - def get_container_name(self, number, one_off=False): - # TODO: Implement issue #652 here - return build_container_name(self.project, self.name, number, one_off) - # TODO: this would benefit from github.com/docker/docker/pull/14699 # to remove the need to inspect every container def _next_container_number(self, one_off=False): @@ -560,13 +582,10 @@ class Service(object): for k in DOCKER_CONFIG_KEYS if k in self.options) container_options.update(override_options) - if self.custom_container_name() and not one_off: - container_options['name'] = self.custom_container_name() - elif not container_options.get('name'): + if not container_options.get('name'): container_options['name'] = self.get_container_name(number, one_off) - if 'detach' not in container_options: - container_options['detach'] = True + container_options.setdefault('detach', True) # If a qualified hostname was given, split it into an # unqualified hostname and a domainname unless domainname @@ -580,16 +599,9 @@ class Service(object): container_options['domainname'] = parts[2] if 'ports' in container_options or 'expose' in self.options: - ports = [] - all_ports = container_options.get('ports', []) + self.options.get('expose', []) - for port_range in all_ports: - internal_range, _ = split_port(port_range) - for port in internal_range: - port = str(port) - if '/' in port: - port = tuple(port.split('/')) - ports.append(port) - container_options['ports'] = ports + container_options['ports'] = build_container_ports( + container_options, + self.options) container_options['environment'] = merge_environment( self.options.get('environment'), @@ -655,6 +667,8 @@ class Service(object): ipc_mode=options.get('ipc'), cgroup_parent=options.get('cgroup_parent'), cpu_quota=options.get('cpu_quota'), + shm_size=options.get('shm_size'), + tmpfs=options.get('tmpfs'), ) def build(self, no_cache=False, pull=False, force_rm=False): @@ -712,9 +726,16 @@ class Service(object): '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False") ] + @property def custom_container_name(self): return self.options.get('container_name') + def get_container_name(self, number, one_off=False): + if self.custom_container_name and not one_off: + return self.custom_container_name + + return build_container_name(self.project, self.name, number, one_off) + def remove_image(self, image_type): if not image_type or image_type == ImageType.none: return False @@ -1029,3 +1050,18 @@ def format_environment(environment): return key return '{key}={value}'.format(key=key, value=value) return [format_env(*item) for item in environment.items()] + +# Ports + + +def build_container_ports(container_options, options): + ports = [] + all_ports = container_options.get('ports', []) + options.get('expose', []) + for port_range in all_ports: + internal_range, _ = split_port(port_range) + for port in internal_range: + port = str(port) + if '/' in port: + port = tuple(port.split('/')) + ports.append(port) + return ports diff --git a/compose/volume.py b/compose/volume.py index 26fbda96f..f440ba40c 100644 --- a/compose/volume.py +++ b/compose/volume.py @@ -3,7 +3,6 @@ from __future__ import unicode_literals import logging -from docker.errors import APIError from docker.errors import NotFound from .config import ConfigurationError @@ -77,17 +76,21 @@ class ProjectVolumes(object): def remove(self): for volume in self.volumes.values(): - volume.remove() + try: + volume.remove() + except NotFound: + log.warn("Volume %s not found.", volume.full_name) def initialize(self): try: for volume in self.volumes.values(): + volume_exists = volume.exists() if volume.external: log.debug( 'Volume {0} declared as external. No new ' 'volume will be created.'.format(volume.name) ) - if not volume.exists(): + if not volume_exists: raise ConfigurationError( 'Volume {name} declared as external, but could' ' not be found. Please create the volume manually' @@ -97,28 +100,32 @@ class ProjectVolumes(object): ) ) continue - log.info( - 'Creating volume "{0}" with {1} driver'.format( - volume.full_name, volume.driver or 'default' + + if not volume_exists: + log.info( + 'Creating volume "{0}" with {1} driver'.format( + volume.full_name, volume.driver or 'default' + ) ) - ) - volume.create() + volume.create() + else: + driver = volume.inspect()['Driver'] + if volume.driver is not None and driver != volume.driver: + raise ConfigurationError( + 'Configuration for volume {0} specifies driver ' + '{1}, but a volume with the same name uses a ' + 'different driver ({3}). If you wish to use the ' + 'new configuration, please remove the existing ' + 'volume "{2}" first:\n' + '$ docker volume rm {2}'.format( + volume.name, volume.driver, volume.full_name, + volume.inspect()['Driver'] + ) + ) except NotFound: raise ConfigurationError( 'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver) ) - except APIError as e: - if 'Choose a different volume name' in str(e): - raise ConfigurationError( - 'Configuration for volume {0} specifies driver {1}, but ' - 'a volume with the same name uses a different driver ' - '({3}). If you wish to use the new configuration, please ' - 'remove the existing volume "{2}" first:\n' - '$ docker volume rm {2}'.format( - volume.name, volume.driver, volume.full_name, - volume.inspect()['Driver'] - ) - ) def namespace_spec(self, volume_spec): if not volume_spec.is_named_volume: diff --git a/contrib/completion/bash/docker-compose b/contrib/completion/bash/docker-compose index 3b135311a..66747fbd5 100644 --- a/contrib/completion/bash/docker-compose +++ b/contrib/completion/bash/docker-compose @@ -18,7 +18,22 @@ __docker_compose_q() { - docker-compose 2>/dev/null ${compose_file:+-f $compose_file} ${compose_project:+-p $compose_project} "$@" + docker-compose 2>/dev/null $daemon_options "$@" +} + +# Transforms a multiline list of strings into a single line string +# with the words separated by "|". +__docker_compose_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# Transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_compose_to_extglob() { + local extglob=$( __docker_compose_to_alternatives "$1" ) + echo "@($extglob)" } # suppress trailing whitespace @@ -27,20 +42,6 @@ __docker_compose_nospace() { type compopt &>/dev/null && compopt -o nospace } -# For compatibility reasons, Compose and therefore its completion supports several -# stack compositon files as listed here, in descending priority. -# Support for these filenames might be dropped in some future version. -__docker_compose_compose_file() { - local file - for file in docker-compose.y{,a}ml ; do - [ -e $file ] && { - echo $file - return - } - done - echo docker-compose.yml -} - # Extracts all service names from the compose file. ___docker_compose_all_services_in_compose_file() { __docker_compose_q config --services @@ -127,18 +128,22 @@ _docker_compose_create() { _docker_compose_docker_compose() { case "$prev" in + --tlscacert|--tlscert|--tlskey) + _filedir + return + ;; --file|-f) _filedir "y?(a)ml" return ;; - --project-name|-p) + $(__docker_compose_to_extglob "$daemon_options_with_args") ) return ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "--file -f --help -h --project-name -p --verbose --version -v" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "$daemon_boolean_options $daemon_options_with_args --help -h --verbose --version -v" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) @@ -157,7 +162,7 @@ _docker_compose_down() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help --rmi --volumes -v" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--help --rmi --volumes -v --remove-orphans" -- "$cur" ) ) ;; esac } @@ -181,6 +186,24 @@ _docker_compose_events() { } +_docker_compose_exec() { + case "$prev" in + --index|--user) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-d --help --index --privileged -T --user" -- "$cur" ) ) + ;; + *) + __docker_compose_services_running + ;; + esac +} + + _docker_compose_help() { COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) } @@ -206,9 +229,15 @@ _docker_compose_kill() { _docker_compose_logs() { + case "$prev" in + --tail) + return + ;; + esac + case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help --no-color" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--follow -f --help --no-color --tail --timestamps -t" -- "$cur" ) ) ;; *) __docker_compose_services_all @@ -296,7 +325,7 @@ _docker_compose_restart() { _docker_compose_rm() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--force -f --help -v" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--all -a --force -f --help -v" -- "$cur" ) ) ;; *) __docker_compose_services_stopped @@ -312,14 +341,14 @@ _docker_compose_run() { __docker_compose_nospace return ;; - --entrypoint|--name|--user|-u) + --entrypoint|--name|--user|-u|--workdir|-w) return ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u --workdir -w" -- "$cur" ) ) ;; *) __docker_compose_services_all @@ -402,7 +431,7 @@ _docker_compose_up() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--abort-on-container-exit -d --force-recreate --help --no-build --no-color --no-deps --no-recreate --timeout -t" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--abort-on-container-exit --build -d --force-recreate --help --no-build --no-color --no-deps --no-recreate --timeout -t --remove-orphans" -- "$cur" ) ) ;; *) __docker_compose_services_all @@ -430,6 +459,7 @@ _docker_compose() { create down events + exec help kill logs @@ -448,6 +478,22 @@ _docker_compose() { version ) + # options for the docker daemon that have to be passed to secondary calls to + # docker-compose executed by this script + local daemon_boolean_options=" + --skip-hostname-check + --tls + --tlsverify + " + local daemon_options_with_args=" + --file -f + --host -H + --project-name -p + --tlscacert + --tlscert + --tlskey + " + COMPREPLY=() local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword @@ -455,17 +501,19 @@ _docker_compose() { # search subcommand and invoke its handler. # special treatment of some top-level options local command='docker_compose' + local daemon_options=() local counter=1 - local compose_file compose_project + while [ $counter -lt $cword ]; do case "${words[$counter]}" in - --file|-f) - (( counter++ )) - compose_file="${words[$counter]}" + $(__docker_compose_to_extglob "$daemon_boolean_options") ) + local opt=${words[counter]} + daemon_options+=($opt) ;; - --project-name|p) - (( counter++ )) - compose_project="${words[$counter]}" + $(__docker_compose_to_extglob "$daemon_options_with_args") ) + local opt=${words[counter]} + local arg=${words[++counter]} + daemon_options+=($opt $arg) ;; -*) ;; diff --git a/contrib/completion/zsh/_docker-compose b/contrib/completion/zsh/_docker-compose index f67bc9f64..ec9cb682f 100644 --- a/contrib/completion/zsh/_docker-compose +++ b/contrib/completion/zsh/_docker-compose @@ -223,6 +223,18 @@ __docker-compose_subcommand() { '--json[Output events as a stream of json objects.]' \ '*:services:__docker-compose_services_all' && ret=0 ;; + (exec) + _arguments \ + $opts_help \ + '-d[Detached mode: Run command in the background.]' \ + '--privileged[Give extended privileges to the process.]' \ + '--user=[Run the command as this user.]:username:_users' \ + '-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \ + '--index=[Index of the container if there are multiple instances of a service (default: 1)]:index: ' \ + '(-):running services:__docker-compose_runningservices' \ + '(-):command: _command_names -e' \ + '*::arguments: _normal' && ret=0 + ;; (help) _arguments ':subcommand:__docker-compose_commands' && ret=0 ;; @@ -235,7 +247,10 @@ __docker-compose_subcommand() { (logs) _arguments \ $opts_help \ + '(-f --follow)'{-f,--follow}'[Follow log output]' \ '--no-color[Produce monochrome output.]' \ + '--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \ + '(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \ '*:services:__docker-compose_services_all' && ret=0 ;; (pause) @@ -266,6 +281,7 @@ __docker-compose_subcommand() { (rm) _arguments \ $opts_help \ + '(-a --all)'{-a,--all}"[Also remove one-off containers]" \ '(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \ '-v[Remove volumes associated with containers]' \ '*:stopped services:__docker-compose_stoppedservices' && ret=0 @@ -274,15 +290,16 @@ __docker-compose_subcommand() { _arguments \ $opts_help \ '-d[Detached mode: Run container in the background, print new container name.]' \ - '--name[Assign a name to the container]:name: ' \ - '--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \ '*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \ - '(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \ + '--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \ + '--name[Assign a name to the container]:name: ' \ "--no-deps[Don't start linked services.]" \ + '(-p --publish)'{-p,--publish=-}"[Run command with manually mapped container's port(s) to the host.]" \ '--rm[Remove container after run. Ignored in detached mode.]' \ "--service-ports[Run command with the service's ports enabled and mapped to the host.]" \ - '(-p --publish)'{-p,--publish=-}"[Run command with manually mapped container's port(s) to the host.]" \ '-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \ + '(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \ + '(-w --workdir)'{-w=,--workdir=}'[Working directory inside the container]:workdir: ' \ '(-):services:__docker-compose_services' \ '(-):command: _command_names -e' \ '*::arguments: _normal' && ret=0 @@ -313,6 +330,7 @@ __docker-compose_subcommand() { _arguments \ $opts_help \ '(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names.]' \ + '--build[Build images before starting containers.]' \ '--no-color[Produce monochrome output.]' \ "--no-deps[Don't start linked services.]" \ "--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \ diff --git a/docker-compose.spec b/docker-compose.spec index b3d8db399..3a165dd67 100644 --- a/docker-compose.spec +++ b/docker-compose.spec @@ -18,23 +18,13 @@ exe = EXE(pyz, a.datas, [ ( - 'compose/config/fields_schema_v1.json', - 'compose/config/fields_schema_v1.json', + 'compose/config/config_schema_v1.json', + 'compose/config/config_schema_v1.json', 'DATA' ), ( - 'compose/config/fields_schema_v2.0.json', - 'compose/config/fields_schema_v2.0.json', - 'DATA' - ), - ( - 'compose/config/service_schema_v1.json', - 'compose/config/service_schema_v1.json', - 'DATA' - ), - ( - 'compose/config/service_schema_v2.0.json', - 'compose/config/service_schema_v2.0.json', + 'compose/config/config_schema_v2.0.json', + 'compose/config/config_schema_v2.0.json', 'DATA' ), ( diff --git a/docs/Dockerfile b/docs/Dockerfile index 5f32dc4dc..b16d0d2c3 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -10,6 +10,7 @@ RUN svn checkout https://github.com/docker/kitematic/trunk/docs /docs/content/ki RUN svn checkout https://github.com/docker/toolbox/trunk/docs /docs/content/toolbox RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/project + ENV PROJECT=compose # To get the git info for this repo COPY . /src diff --git a/docs/compose-file.md b/docs/compose-file.md index 97b8ba51f..e9ec0a2de 100644 --- a/docs/compose-file.md +++ b/docs/compose-file.md @@ -59,6 +59,14 @@ optionally [dockerfile](#dockerfile) and [args](#args). args: buildno: 1 +If you specify `image` as well as `build`, then Compose tags the built image +with the tag specified in `image`: + + build: ./dir + image: webapp + +This will result in an image tagged `webapp`, built from `./dir`. + > **Note**: In the [version 1 file format](#version-1), `build` is different in > two ways: > @@ -95,13 +103,13 @@ specified. > **Note**: In the [version 1 file format](#version-1), `dockerfile` is > different in two ways: -> -> - It appears alongside `build`, not as a sub-option: -> -> build: . -> dockerfile: Dockerfile-alternate -> - Using `dockerfile` together with `image` is not allowed. Attempting to do -> so results in an error. + + * It appears alongside `build`, not as a sub-option: + + build: . + dockerfile: Dockerfile-alternate + + * Using `dockerfile` together with `image` is not allowed. Attempting to do so results in an error. #### args @@ -195,6 +203,11 @@ Simple example: db: image: postgres +> **Note:** `depends_on` will not wait for `db` and `redis` to be "ready" before +> starting `web` - only until they have been started. If you need to wait +> for a service to be ready, see [Controlling startup order](startup-order.md) +> for more on this problem and strategies for solving it. + ### dns Custom DNS servers. Can be a single value or a list. @@ -213,6 +226,15 @@ Custom DNS search domains. Can be a single value or a list. - dc1.example.com - dc2.example.com +### tmpfs + +Mount a temporary file system inside the container. Can be a single value or a list. + + tmpfs: /run + tmpfs: + - /run + - /tmp + ### entrypoint Override the default entrypoint. @@ -340,13 +362,22 @@ An entry with the ip address and hostname will be created in `/etc/hosts` inside ### image -Tag or partial image ID. Can be local or remote - Compose will attempt to -pull if it doesn't exist locally. +Specify the image to start the container from. Can either be a repository/tag or +a partial image ID. - image: ubuntu - image: orchardup/postgresql + image: redis + image: ubuntu:14.04 + image: tutum/influxdb + image: example-registry.com:4000/postgresql image: a4bc65fd +If the image does not exist, Compose attempts to pull it, unless you have also +specified [build](#build), in which case it builds it using the specified +options and tags it with the specified tag. + +> **Note**: In the [version 1 file format](#version-1), using `build` together +> with `image` is not allowed. Attempting to do so results in an error. + ### labels Add metadata to containers using [Docker labels](https://docs.docker.com/engine/userguide/labels-custom-metadata/). You can use either an array or a dictionary. @@ -496,7 +527,7 @@ The general format is shown here. In the example below, three services are provided (`web`, `worker`, and `db`), along with two networks (`new` and `legacy`). The `db` service is reachable at the hostname `db` or `database` on the `new` network, and at `db` or `mysql` on the `legacy` network. - version: 2 + version: '2' services: web: @@ -523,6 +554,38 @@ In the example below, three services are provided (`web`, `worker`, and `db`), a new: legacy: +#### ipv4_address, ipv6_address + +Specify a static IP address for containers for this service when joining the network. + +The corresponding network configuration in the [top-level networks section](#network-configuration-reference) must have an `ipam` block with subnet and gateway configurations covering each static address. If IPv6 addressing is desired, the `com.docker.network.enable_ipv6` driver option must be set to `true`. + +An example: + + version: '2' + + services: + app: + image: busybox + command: ifconfig + networks: + app_net: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + + networks: + app_net: + driver: bridge + driver_opts: + com.docker.network.enable_ipv6: "true" + ipam: + driver: default + config: + - subnet: 172.16.238.0/24 + gateway: 172.16.238.1 + - subnet: 2001:3984:3989::/64 + gateway: 2001:3984:3989::1 + ### pid pid: "host" @@ -628,7 +691,8 @@ information. ### volumes_from Mount all of the volumes from another service or container, optionally -specifying read-only access(``ro``) or read-write(``rw``). +specifying read-only access (``ro``) or read-write (``rw``). If no access level is specified, +then read-write will be used. volumes_from: - service_name @@ -645,7 +709,7 @@ specifying read-only access(``ro``) or read-write(``rw``). > - container_name > - container_name:rw -### cpu\_shares, cpu\_quota, cpuset, domainname, hostname, ipc, mac\_address, mem\_limit, memswap\_limit, privileged, read\_only, restart, stdin\_open, tty, user, working\_dir +### cpu\_shares, cpu\_quota, cpuset, domainname, hostname, ipc, mac\_address, mem\_limit, memswap\_limit, privileged, read\_only, restart, shm\_size, stdin\_open, tty, user, working\_dir Each of these is a single value, analogous to its [docker run](https://docs.docker.com/engine/reference/run/) counterpart. @@ -669,6 +733,7 @@ Each of these is a single value, analogous to its restart: always read_only: true + shm_size: 64M stdin_open: true tty: true @@ -679,7 +744,7 @@ While it is possible to declare volumes on the fly as part of the service declaration, this section allows you to create named volumes that can be reused across multiple services (without relying on `volumes_from`), and are easily retrieved and inspected using the docker command line or API. -See the [docker volume](/engine/reference/commandline/volume_create.md) +See the [docker volume](https://docs.docker.com/engine/reference/commandline/volume_create/) subcommand documentation for more information. ### driver @@ -699,7 +764,7 @@ documentation for more information. Optional. foo: "bar" baz: 1 -## external +### external If set to `true`, specifies that this volume has been created outside of Compose. `docker-compose up` will not attempt to create it, and will raise diff --git a/docs/django.md b/docs/django.md index e616d0e12..fb1fa2141 100644 --- a/docs/django.md +++ b/docs/django.md @@ -10,10 +10,9 @@ weight=4 -# Quickstart: Compose and Django +# Quickstart: Docker Compose and Django -This quick-start guide demonstrates how to use Compose to set up and run a -simple Django/PostgreSQL app. Before starting, you'll need to have +This quick-start guide demonstrates how to use Docker Compose to set up and run a simple Django/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md). ## Define the project components @@ -119,12 +118,23 @@ In this step, you create a Django started project by building the image from the -rwxr-xr-x 1 root root manage.py -rw-rw-r-- 1 user user requirements.txt - The files `django-admin` created are owned by root. This happens because - the container runs as the `root` user. + If you are running Docker on Linux, the files `django-admin` created are owned + by root. This happens because the container runs as the root user. Change the + ownership of the the new files. -4. Change the ownership of the new files. + sudo chown -R $USER:$USER . - sudo chown -R $USER:$USER . + If you are running Docker on Mac or Windows, you should already have ownership + of all files, including those generated by `django-admin`. List the files just + verify this. + + $ ls -l + total 32 + -rw-r--r-- 1 user staff 145 Feb 13 23:00 Dockerfile + drwxr-xr-x 6 user staff 204 Feb 13 23:07 composeexample + -rw-r--r-- 1 user staff 159 Feb 13 23:02 docker-compose.yml + -rwxr-xr-x 1 user staff 257 Feb 13 23:07 manage.py + -rw-r--r-- 1 user staff 16 Feb 13 23:01 requirements.txt ## Connect the database @@ -171,6 +181,8 @@ In this section, you set up the database connection for Django. Docker host. If you are using a Docker Machine VM, you can use the `docker-machine ip MACHINE_NAME` to get the IP address. + ![Django example](images/django-it-worked.png) + ## More Compose documentation - [User guide](index.md) diff --git a/docs/env-file.md b/docs/env-file.md new file mode 100644 index 000000000..a285a7908 --- /dev/null +++ b/docs/env-file.md @@ -0,0 +1,43 @@ + + + +# Environment file + +Compose supports declaring default environment variables in an environment +file named `.env` and placed in the same folder as your +[compose file](compose-file.md). + +Compose expects each line in an env file to be in `VAR=VAL` format. Lines +beginning with `#` (i.e. comments) are ignored, as are blank lines. + +> Note: Values present in the environment at runtime will always override +> those defined inside the `.env` file. Similarly, values passed via +> command-line arguments take precedence as well. + +Those environment variables will be used for +[variable substitution](compose-file.md#variable-substitution) in your Compose +file, but can also be used to define the following +[CLI variables](reference/envvars.md): + +- `COMPOSE_API_VERSION` +- `COMPOSE_FILE` +- `COMPOSE_HTTP_TIMEOUT` +- `COMPOSE_PROJECT_NAME` +- `DOCKER_CERT_PATH` +- `DOCKER_HOST` +- `DOCKER_TLS_VERIFY` + +## More Compose documentation + +- [User guide](index.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/extends.md b/docs/extends.md index bceb02578..6f457391f 100644 --- a/docs/extends.md +++ b/docs/extends.md @@ -290,34 +290,20 @@ replaces the old value. # result command: python otherapp.py -In the case of `build` and `image`, using one in the local service causes -Compose to discard the other, if it was defined in the original service. +> **Note:** In the case of `build` and `image`, when using +> [version 1 of the Compose file format](compose-file.md#version-1), using one +> option in the local service causes Compose to discard the other option if it +> was defined in the original service. +> +> For example, if the original service defines `image: webapp` and the +> local service defines `build: .` then the resulting service will have +> `build: .` and no `image` option. +> +> This is because `build` and `image` cannot be used together in a version 1 +> file. -Example of image replacing build: - - # original service - build: . - - # local service - image: redis - - # result - image: redis - - -Example of build replacing image: - - # original service - image: redis - - # local service - build: . - - # result - build: . - -For the **multi-value options** `ports`, `expose`, `external_links`, `dns` and -`dns_search`, Compose concatenates both sets of values: +For the **multi-value options** `ports`, `expose`, `external_links`, `dns`, +`dns_search`, and `tmpfs`, Compose concatenates both sets of values: # original service expose: diff --git a/docs/faq.md b/docs/faq.md index 73596c18b..45885255f 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -15,7 +15,13 @@ weight=90 If you don’t see your question here, feel free to drop by `#docker-compose` on freenode IRC and ask the community. -## Why do my services take 10 seconds to stop? + +## Can I control service startup order? + +Yes - see [Controlling startup order](startup-order.md). + + +## Why do my services take 10 seconds to recreate or stop? Compose stop attempts to stop a container by sending a `SIGTERM`. It then waits for a [default timeout of 10 seconds](./reference/stop.md). After the timeout, @@ -40,6 +46,12 @@ in your Dockerfile. * If you are able, modify the application that you're running to add an explicit signal handler for `SIGTERM`. +* Set the `stop_signal` to a signal which the application knows how to handle: + + web: + build: . + stop_signal: SIGINT + * If you can't modify the application, wrap the application in a lightweight init system (like [s6](http://skarnet.org/software/s6/)) or a signal proxy (like [dumb-init](https://github.com/Yelp/dumb-init) or @@ -84,30 +96,6 @@ specify the filename to use, for example: docker-compose -f docker-compose.json up ``` -## How do I get Compose to wait for my database to be ready before starting my application? - -Unfortunately, Compose won't do that for you but for a good reason. - -The problem of waiting for a database to be ready is really just a subset of a -much larger problem of distributed systems. In production, your database could -become unavailable or move hosts at any time. The application needs to be -resilient to these types of failures. - -To handle this, the application would attempt to re-establish a connection to -the database after a failure. If the application retries the connection, -it should eventually be able to connect to the database. - -To wait for the application to be in a good state, you can implement a -healthcheck. A healthcheck makes a request to the application and checks -the response for a success status code. If it is not successful it waits -for a short period of time, and tries again. After some timeout value, the check -stops trying and report a failure. - -If you need to run tests against your application, you can start by running a -healthcheck. Once the healthcheck gets a successful response, you can start -running your tests. - - ## Should I include my code with `COPY`/`ADD` or a volume? You can add your code to the image using `COPY` or `ADD` directive in a diff --git a/docs/gettingstarted.md b/docs/gettingstarted.md index 36577f075..60482bce5 100644 --- a/docs/gettingstarted.md +++ b/docs/gettingstarted.md @@ -12,7 +12,7 @@ weight=-85 # Getting Started -On this page you build a simple Python web application running on Compose. The +On this page you build a simple Python web application running on Docker Compose. The application uses the Flask framework and increments a value in Redis. While the sample uses Python, the concepts demonstrated here should be understandable even if you're not familiar with it. diff --git a/docs/images/django-it-worked.png b/docs/images/django-it-worked.png new file mode 100644 index 000000000..75769754b Binary files /dev/null and b/docs/images/django-it-worked.png differ diff --git a/docs/images/rails-welcome.png b/docs/images/rails-welcome.png new file mode 100644 index 000000000..51512dbda Binary files /dev/null and b/docs/images/rails-welcome.png differ diff --git a/docs/images/wordpress-files.png b/docs/images/wordpress-files.png new file mode 100644 index 000000000..4762935ba Binary files /dev/null and b/docs/images/wordpress-files.png differ diff --git a/docs/images/wordpress-lang.png b/docs/images/wordpress-lang.png new file mode 100644 index 000000000..f0bd864ef Binary files /dev/null and b/docs/images/wordpress-lang.png differ diff --git a/docs/images/wordpress-welcome.png b/docs/images/wordpress-welcome.png new file mode 100644 index 000000000..c9ba20368 Binary files /dev/null and b/docs/images/wordpress-welcome.png differ diff --git a/docs/index.md b/docs/index.md index f5d84218f..f1b710794 100644 --- a/docs/index.md +++ b/docs/index.md @@ -23,6 +23,7 @@ Compose is a tool for defining and running multi-container Docker applications. - [Frequently asked questions](faq.md) - [Command line reference](./reference/index.md) - [Compose file reference](compose-file.md) +- [Environment file](env-file.md) To see a detailed list of changes for past and current releases of Docker Compose, please refer to the diff --git a/docs/install.md b/docs/install.md index a7a4539b2..e8fede82a 100644 --- a/docs/install.md +++ b/docs/install.md @@ -12,21 +12,21 @@ weight=-90 # Install Docker Compose -You can run Compose on OS X and 64-bit Linux. It is currently not supported on -the Windows operating system. To install Compose, you'll need to install Docker -first. +You can run Compose on OS X, Windows and 64-bit Linux. To install it, you'll need to install Docker first. To install Compose, do the following: -1. Install Docker Engine version 1.7.1 or greater: +1. Install Docker Engine: - * Mac OS X installation (Toolbox installation includes both Engine and Compose) + * Mac OS X installation - * Ubuntu installation + * Windows installation - * other system installations + * Ubuntu installation -2. Mac OS X users are done installing. Others should continue to the next step. + * other system installations + +2. The Docker Toolbox installation includes both Engine and Compose, so Mac and Windows users are done installing. Others should continue to the next step. 3. Go to the Compose repository release page on GitHub. @@ -39,7 +39,7 @@ which the release page specifies, in your terminal. The following is an example command illustrating the format: - curl -L https://github.com/docker/compose/releases/download/1.6.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose + curl -L https://github.com/docker/compose/releases/download/1.7.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose If you have problems installing with `curl`, see [Alternative Install Options](#alternative-install-options). @@ -54,7 +54,7 @@ which the release page specifies, in your terminal. 7. Test the installation. $ docker-compose --version - docker-compose version: 1.6.2 + docker-compose version: 1.7.0 ## Alternative install options @@ -77,7 +77,7 @@ to get started. Compose can also be run inside a container, from a small bash script wrapper. To install compose as a container run: - $ curl -L https://github.com/docker/compose/releases/download/1.6.2/run.sh > /usr/local/bin/docker-compose + $ curl -L https://github.com/docker/compose/releases/download/1.7.0/run.sh > /usr/local/bin/docker-compose $ chmod +x /usr/local/bin/docker-compose ## Master builds diff --git a/docs/networking.md b/docs/networking.md index 1fd6c1161..9739a0884 100644 --- a/docs/networking.md +++ b/docs/networking.md @@ -15,7 +15,7 @@ weight=21 > **Note:** This document only applies if you're using [version 2 of the Compose file format](compose-file.md#versioning). Networking features are not supported for version 1 (legacy) Compose files. By default Compose sets up a single -[network](/engine/reference/commandline/network_create.md) for your app. Each +[network](https://docs.docker.com/engine/reference/commandline/network_create/) for your app. Each container for a service joins the default network and is both *reachable* by other containers on that network, and *discoverable* by them at a hostname identical to the container name. @@ -78,11 +78,11 @@ See the [links reference](compose-file.md#links) for more information. When [deploying a Compose application to a Swarm cluster](swarm.md), you can make use of the built-in `overlay` driver to enable multi-host communication between containers with no changes to your Compose file or application code. -Consult the [Getting started with multi-host networking](/engine/userguide/networking/get-started-overlay.md) to see how to set up a Swarm cluster. The cluster will use the `overlay` driver by default, but you can specify it explicitly if you prefer - see below for how to do this. +Consult the [Getting started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to set up a Swarm cluster. The cluster will use the `overlay` driver by default, but you can specify it explicitly if you prefer - see below for how to do this. ## Specifying custom networks -Instead of just using the default app network, you can specify your own networks with the top-level `networks` key. This lets you create more complex topologies and specify [custom network drivers](/engine/extend/plugins_network.md) and options. You can also use it to connect services to externally-created networks which aren't managed by Compose. +Instead of just using the default app network, you can specify your own networks with the top-level `networks` key. This lets you create more complex topologies and specify [custom network drivers](https://docs.docker.com/engine/extend/plugins_network/) and options. You can also use it to connect services to externally-created networks which aren't managed by Compose. Each service can specify what networks to connect to with the *service-level* `networks` key, which is a list of names referencing entries under the *top-level* `networks` key. @@ -116,6 +116,8 @@ Here's an example Compose file defining two custom networks. The `proxy` service foo: "1" bar: "2" +Networks can be configured with static IP addresses by setting the [ipv4_address and/or ipv6_address](compose-file.md#ipv4-address-ipv6-address) for each attached network. + For full details of the network configuration options available, see the following references: - [Top-level `networks` key](compose-file.md#network-configuration-reference) diff --git a/docs/overview.md b/docs/overview.md index bb3c5d713..03ade3566 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -24,11 +24,14 @@ CI workflows. You can learn more about each case in Using Compose is basically a three-step process. -1. Define your app's environment with a `Dockerfile` so it can be -reproduced anywhere. -2. Define the services that make up your app in `docker-compose.yml` so -they can be run together in an isolated environment. -3. Lastly, run `docker-compose up` and Compose will start and run your entire app. +1. Define your app's environment with a `Dockerfile` so it can be reproduced +anywhere. + +2. Define the services that make up your app in `docker-compose.yml` +so they can be run together in an isolated environment. + +3. Lastly, run +`docker-compose up` and Compose will start and run your entire app. A `docker-compose.yml` looks like this: @@ -37,12 +40,12 @@ A `docker-compose.yml` looks like this: web: build: . ports: - - "5000:5000" + - "5000:5000" volumes: - - .:/code - - logvolume01:/var/log + - .:/code + - logvolume01:/var/log links: - - redis + - redis redis: image: redis volumes: @@ -80,14 +83,12 @@ The features of Compose that make it effective are: ### Multiple isolated environments on a single host -Compose uses a project name to isolate environments from each other. You can use -this project name to: +Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts: -* on a dev host, to create multiple copies of a single environment (ex: you want - to run a stable copy for each feature branch of a project) +* on a dev host, to create multiple copies of a single environment (e.g., you want to run a stable copy for each feature branch of a project) * on a CI server, to keep builds from interfering with each other, you can set the project name to a unique build number -* on a shared host or dev host, to prevent different projects which may use the +* on a shared host or dev host, to prevent different projects, which may use the same service names, from interfering with each other The default project name is the basename of the project directory. You can set @@ -148,9 +149,7 @@ started guide" to a single machine readable Compose file and a few commands. An important part of any Continuous Deployment or Continuous Integration process is the automated test suite. Automated end-to-end testing requires an environment in which to run tests. Compose provides a convenient way to create -and destroy isolated testing environments for your test suite. By defining the full -environment in a [Compose file](compose-file.md) you can create and destroy these -environments in just a few commands: +and destroy isolated testing environments for your test suite. By defining the full environment in a [Compose file](compose-file.md) you can create and destroy these environments in just a few commands: $ docker-compose up -d $ ./run_tests @@ -159,9 +158,7 @@ environments in just a few commands: ### Single host deployments Compose has traditionally been focused on development and testing workflows, -but with each release we're making progress on more production-oriented features. -You can use Compose to deploy to a remote Docker Engine. The Docker Engine may -be a single instance provisioned with +but with each release we're making progress on more production-oriented features. You can use Compose to deploy to a remote Docker Engine. The Docker Engine may be a single instance provisioned with [Docker Machine](https://docs.docker.com/machine/) or an entire [Docker Swarm](https://docs.docker.com/swarm/) cluster. diff --git a/docs/production.md b/docs/production.md index 40ce1e661..9acf64e56 100644 --- a/docs/production.md +++ b/docs/production.md @@ -12,13 +12,18 @@ weight=22 ## Using Compose in production -> Compose is still primarily aimed at development and testing environments. -> Compose may be used for smaller production deployments, but is probably -> not yet suitable for larger deployments. +When you define your app with Compose in development, you can use this +definition to run your application in different environments such as CI, +staging, and production. -When deploying to production, you'll almost certainly want to make changes to -your app configuration that are more appropriate to a live environment. These -changes may include: +The easiest way to deploy an application is to run it on a single server, +similar to how you would run your development environment. If you want to scale +up your application, you can run Compose apps on a Swarm cluster. + +### Modify your Compose file for production + +You'll almost certainly want to make changes to your app configuration that are +more appropriate to a live environment. These changes may include: - Removing any volume bindings for application code, so that code stays inside the container and can't be changed from outside @@ -73,8 +78,8 @@ commands will work with no further configuration. system, exposes the same API as a single Docker host, which means you can use Compose against a Swarm instance and run your apps across multiple hosts. -Compose/Swarm integration is still in the experimental stage, but if you'd like -to explore and experiment, check out the [integration guide](swarm.md). +Read more about the Compose/Swarm integration in the +[integration guide](swarm.md). ## Compose documentation diff --git a/docs/rails.md b/docs/rails.md index 8b7b4fd91..a8fc383e7 100644 --- a/docs/rails.md +++ b/docs/rails.md @@ -9,9 +9,9 @@ weight=5 +++ -## Quickstart: Compose and Rails +## Quickstart: Docker Compose and Rails -This Quickstart guide will show you how to use Compose to set up and run a Rails/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md). +This Quickstart guide will show you how to use Docker Compose to set up and run a Rails/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md). ### Define the project @@ -30,7 +30,9 @@ Dockerfile consists of: RUN bundle install ADD . /myapp -That'll put your application code inside an image that will build a container with Ruby, Bundler and all your dependencies inside it. For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/). +That'll put your application code inside an image that will build a container +with Ruby, Bundler and all your dependencies inside it. For more information on +how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/). Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`. @@ -41,7 +43,11 @@ You'll need an empty `Gemfile.lock` in order to build our `Dockerfile`. $ touch Gemfile.lock -Finally, `docker-compose.yml` is where the magic happens. This file describes the services that comprise your app (a database and a web app), how to get each one's Docker image (the database just runs on a pre-made PostgreSQL image, and the web app is built from the current directory), and the configuration needed to link them together and expose the web app's port. +Finally, `docker-compose.yml` is where the magic happens. This file describes +the services that comprise your app (a database and a web app), how to get each +one's Docker image (the database just runs on a pre-made PostgreSQL image, and +the web app is built from the current directory), and the configuration needed +to link them together and expose the web app's port. version: '2' services: @@ -64,22 +70,38 @@ using `docker-compose run`: $ docker-compose run web rails new . --force --database=postgresql --skip-bundle -First, Compose will build the image for the `web` service using the -`Dockerfile`. Then it'll run `rails new` inside a new container, using that -image. Once it's done, you should have generated a fresh app: +First, Compose will build the image for the `web` service using the `Dockerfile`. Then it'll run `rails new` inside a new container, using that image. Once it's done, you should have generated a fresh app: - $ ls - Dockerfile app docker-compose.yml tmp - Gemfile bin lib vendor - Gemfile.lock config log - README.rdoc config.ru public - Rakefile db test + $ ls -l + total 56 + -rw-r--r-- 1 user staff 215 Feb 13 23:33 Dockerfile + -rw-r--r-- 1 user staff 1480 Feb 13 23:43 Gemfile + -rw-r--r-- 1 user staff 2535 Feb 13 23:43 Gemfile.lock + -rw-r--r-- 1 root root 478 Feb 13 23:43 README.rdoc + -rw-r--r-- 1 root root 249 Feb 13 23:43 Rakefile + drwxr-xr-x 8 root root 272 Feb 13 23:43 app + drwxr-xr-x 6 root root 204 Feb 13 23:43 bin + drwxr-xr-x 11 root root 374 Feb 13 23:43 config + -rw-r--r-- 1 root root 153 Feb 13 23:43 config.ru + drwxr-xr-x 3 root root 102 Feb 13 23:43 db + -rw-r--r-- 1 user staff 161 Feb 13 23:35 docker-compose.yml + drwxr-xr-x 4 root root 136 Feb 13 23:43 lib + drwxr-xr-x 3 root root 102 Feb 13 23:43 log + drwxr-xr-x 7 root root 238 Feb 13 23:43 public + drwxr-xr-x 9 root root 306 Feb 13 23:43 test + drwxr-xr-x 3 root root 102 Feb 13 23:43 tmp + drwxr-xr-x 3 root root 102 Feb 13 23:43 vendor -The files `rails new` created are owned by root. This happens because the -container runs as the `root` user. Change the ownership of the new files. +If you are running Docker on Linux, the files `rails new` created are owned by +root. This happens because the container runs as the root user. Change the +ownership of the the new files. - sudo chown -R $USER:$USER . + sudo chown -R $USER:$USER . + +If you are running Docker on Mac or Windows, you should already have ownership +of all files, including those generated by `rails new`. List the files just to +verify this. Uncomment the line in your new `Gemfile` which loads `therubyracer`, so you've got a Javascript runtime: @@ -132,6 +154,14 @@ Finally, you need to create the database. In another terminal, run: That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` returns the Docker host IP address. +![Rails example](images/rails-welcome.png) + +>**Note**: If you stop the example application and attempt to restart it, you might get the +following error: `web_1 | A server is already running. Check +/myapp/tmp/pids/server.pid.` One way to resolve this is to delete the file +`tmp/pids/server.pid`, and then re-start the application with `docker-compose +up`. + ## More Compose documentation diff --git a/docs/reference/create.md b/docs/reference/create.md index a785e2c70..5065e8beb 100644 --- a/docs/reference/create.md +++ b/docs/reference/create.md @@ -12,14 +12,15 @@ parent = "smn_compose_cli" # create ``` +Creates containers for a service. + Usage: create [options] [SERVICE...] Options: ---force-recreate Recreate containers even if their configuration and - image haven't changed. Incompatible with --no-recreate. ---no-recreate If containers already exist, don't recreate them. - Incompatible with --force-recreate. ---no-build Don't build an image, even if it's missing + --force-recreate Recreate containers even if their configuration and + image haven't changed. Incompatible with --no-recreate. + --no-recreate If containers already exist, don't recreate them. + Incompatible with --force-recreate. + --no-build Don't build an image, even if it's missing. + --build Build images before creating containers. ``` - -Creates containers for a service. diff --git a/docs/reference/down.md b/docs/reference/down.md index 2495abeac..e8b1db597 100644 --- a/docs/reference/down.md +++ b/docs/reference/down.md @@ -18,9 +18,11 @@ created by `up`. Only containers and networks are removed by default. Usage: down [options] Options: - --rmi type Remove images, type may be one of: 'all' to remove - all images, or 'local' to remove only images that - don't have an custom name set by the `image` field - -v, --volumes Remove data volumes + --rmi type Remove images, type may be one of: 'all' to remove + all images, or 'local' to remove only images that + don't have an custom name set by the `image` field + -v, --volumes Remove data volumes + --remove-orphans Remove containers for services not defined in the + Compose file ``` diff --git a/docs/reference/envvars.md b/docs/reference/envvars.md index 6360fe54a..6f7fb7919 100644 --- a/docs/reference/envvars.md +++ b/docs/reference/envvars.md @@ -17,6 +17,9 @@ Several environment variables are available for you to configure the Docker Comp Variables starting with `DOCKER_` are the same as those used to configure the Docker command-line client. If you're using `docker-machine`, then the `eval "$(docker-machine env my-docker-vm)"` command should set them to their correct values. (In this example, `my-docker-vm` is the name of a machine you created.) +> Note: Some of these variables can also be provided using an +> [environment file](../env-file.md) + ## COMPOSE\_PROJECT\_NAME Sets the project name. This value is prepended along with the service name to the container container on start up. For example, if you project name is `myapp` and it includes two services `db` and `web` then compose starts containers named `myapp_db_1` and `myapp_web_1` respectively. @@ -27,10 +30,15 @@ defaults to the `basename` of the project directory. See also the `-p` ## COMPOSE\_FILE -Specify the file containing the compose configuration. If not provided, -Compose looks for a file named `docker-compose.yml` in the current directory -and then each parent directory in succession until a file by that name is -found. See also the `-f` [command-line option](overview.md). +Specify the path to a Compose file. If not provided, Compose looks for a file named +`docker-compose.yml` in the current directory and then each parent directory in +succession until a file by that name is found. + +This variable supports multiple compose files separate by a path separator (on +Linux and OSX the path separator is `:`, on Windows it is `;`). For example: +`COMPOSE_FILE=docker-compose.yml:docker-compose.prod.yml` + +See also the `-f` [command-line option](overview.md). ## COMPOSE\_API\_VERSION @@ -76,3 +84,4 @@ it failed. Defaults to 60 seconds. - [User guide](../index.md) - [Installing Compose](../install.md) - [Compose file reference](../compose-file.md) +- [Environment file](../env-file.md) diff --git a/docs/reference/exec.md b/docs/reference/exec.md new file mode 100644 index 000000000..6c0eeb04d --- /dev/null +++ b/docs/reference/exec.md @@ -0,0 +1,29 @@ + + +# exec + +``` +Usage: exec [options] SERVICE COMMAND [ARGS...] + +Options: +-d Detached mode: Run command in the background. +--privileged Give extended privileges to the process. +--user USER Run the command as this user. +-T Disable pseudo-tty allocation. By default `docker-compose exec` + allocates a TTY. +--index=index index of the container if there are multiple + instances of a service [default: 1] +``` + +This is equivalent of `docker exec`. With this subcommand you can run arbitrary +commands in your services. Commands are by default allocating a TTY, so you can +do e.g. `docker-compose exec web sh` to get an interactive prompt. diff --git a/docs/reference/logs.md b/docs/reference/logs.md index 5b241ea70..745d24f7f 100644 --- a/docs/reference/logs.md +++ b/docs/reference/logs.md @@ -15,7 +15,11 @@ parent = "smn_compose_cli" Usage: logs [options] [SERVICE...] Options: ---no-color Produce monochrome output. +--no-color Produce monochrome output. +-f, --follow Follow log output +-t, --timestamps Show timestamps +--tail Number of lines to show from the end of the logs + for each container. ``` Displays log output from services. diff --git a/docs/reference/overview.md b/docs/reference/overview.md index 09f2817a6..d59fa5657 100644 --- a/docs/reference/overview.md +++ b/docs/reference/overview.md @@ -25,10 +25,20 @@ Usage: docker-compose -h|--help Options: - -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) - -p, --project-name NAME Specify an alternate project name (default: directory name) - --verbose Show more output - -v, --version Print version and exit + -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) + -p, --project-name NAME Specify an alternate project name (default: directory name) + --verbose Show more output + -v, --version Print version and exit + -H, --host HOST Daemon socket to connect to + + --tls Use TLS; implied by --tlsverify + --tlscacert CA_PATH Trust certs signed only by this CA + --tlscert CLIENT_CERT_PATH Path to TLS certificate file + --tlskey TLS_KEY_PATH Path to TLS key file + --tlsverify Use TLS and verify the remote + --skip-hostname-check Don't check the daemon's hostname against the name specified + in the client certificate (for example if your docker host + is an IP address) Commands: build Build or rebuild services diff --git a/docs/reference/rm.md b/docs/reference/rm.md index f84792243..97698b58b 100644 --- a/docs/reference/rm.md +++ b/docs/reference/rm.md @@ -17,6 +17,7 @@ Usage: rm [options] [SERVICE...] Options: -f, --force Don't ask to confirm removal -v Remove volumes associated with containers +-a, --all Also remove one-off containers ``` Removes stopped service containers. diff --git a/docs/reference/run.md b/docs/reference/run.md index 21890c60a..863544246 100644 --- a/docs/reference/run.md +++ b/docs/reference/run.md @@ -26,6 +26,7 @@ Options: -p, --publish=[] Publish a container's port(s) to the host --service-ports Run command with the service's ports enabled and mapped to the host. -T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY. +-w, --workdir="" Working directory inside the container ``` Runs a one-time command against a service. For example, the following command starts the `web` service and runs `bash` as its command. diff --git a/docs/reference/up.md b/docs/reference/up.md index a02358ec7..3951f8792 100644 --- a/docs/reference/up.md +++ b/docs/reference/up.md @@ -15,22 +15,26 @@ parent = "smn_compose_cli" Usage: up [options] [SERVICE...] Options: --d Detached mode: Run containers in the background, - print new container names. - Incompatible with --abort-on-container-exit. ---no-color Produce monochrome output. ---no-deps Don't start linked services. ---force-recreate Recreate containers even if their configuration - and image haven't changed. - Incompatible with --no-recreate. ---no-recreate If containers already exist, don't recreate them. - Incompatible with --force-recreate. ---no-build Don't build an image, even if it's missing ---abort-on-container-exit Stops all containers if any container was stopped. - Incompatible with -d. --t, --timeout TIMEOUT Use this timeout in seconds for container shutdown - when attached or when containers are already - running. (default: 10) + -d Detached mode: Run containers in the background, + print new container names. + Incompatible with --abort-on-container-exit. + --no-color Produce monochrome output. + --no-deps Don't start linked services. + --force-recreate Recreate containers even if their configuration + and image haven't changed. + Incompatible with --no-recreate. + --no-recreate If containers already exist, don't recreate them. + Incompatible with --force-recreate. + --no-build Don't build an image, even if it's missing. + --build Build images before starting containers. + --abort-on-container-exit Stops all containers if any container was stopped. + Incompatible with -d. + -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown + when attached or when containers are already + running. (default: 10) + --remove-orphans Remove containers for services not defined in + the Compose file + ``` Builds, (re)creates, starts, and attaches to containers for a service. diff --git a/docs/startup-order.md b/docs/startup-order.md new file mode 100644 index 000000000..c67e18295 --- /dev/null +++ b/docs/startup-order.md @@ -0,0 +1,88 @@ + + +# Controlling startup order in Compose + +You can control the order of service startup with the +[depends_on](compose-file.md#depends-on) option. Compose always starts +containers in dependency order, where dependencies are determined by +`depends_on`, `links`, `volumes_from` and `network_mode: "service:..."`. + +However, Compose will not wait until a container is "ready" (whatever that means +for your particular application) - only until it's running. There's a good +reason for this. + +The problem of waiting for a database (for example) to be ready is really just +a subset of a much larger problem of distributed systems. In production, your +database could become unavailable or move hosts at any time. Your application +needs to be resilient to these types of failures. + +To handle this, your application should attempt to re-establish a connection to +the database after a failure. If the application retries the connection, +it should eventually be able to connect to the database. + +The best solution is to perform this check in your application code, both at +startup and whenever a connection is lost for any reason. However, if you don't +need this level of resilience, you can work around the problem with a wrapper +script: + +- Use a tool such as [wait-for-it](https://github.com/vishnubob/wait-for-it) + or [dockerize](https://github.com/jwilder/dockerize). These are small + wrapper scripts which you can include in your application's image and will + poll a given host and port until it's accepting TCP connections. + + Supposing your application's image has a `CMD` set in its Dockerfile, you + can wrap it by setting the entrypoint in `docker-compose.yml`: + + version: "2" + services: + web: + build: . + ports: + - "80:8000" + depends_on: + - "db" + entrypoint: ./wait-for-it.sh db:5432 + db: + image: postgres + +- Write your own wrapper script to perform a more application-specific health + check. For example, you might want to wait until Postgres is definitely + ready to accept commands: + + #!/bin/bash + + set -e + + host="$1" + shift + cmd="$@" + + until psql -h "$host" -U "postgres" -c '\l'; do + >&2 echo "Postgres is unavailable - sleeping" + sleep 1 + done + + >&2 echo "Postgres is up - executing command" + exec $cmd + + You can use this as a wrapper script as in the previous example, by setting + `entrypoint: ./wait-for-postgres.sh db`. + + +## Compose documentation + +- [Installing Compose](install.md) +- [Get started with Django](django.md) +- [Get started with Rails](rails.md) +- [Get started with WordPress](wordpress.md) +- [Command line reference](./reference/index.md) +- [Compose file reference](compose-file.md) diff --git a/docs/swarm.md b/docs/swarm.md index 2b609efaa..ece721939 100644 --- a/docs/swarm.md +++ b/docs/swarm.md @@ -26,14 +26,11 @@ format](compose-file.md#versioning) you are using: - subject to the [limitations](#limitations) described below, - - as long as the Swarm cluster is configured to use the [overlay - driver](/engine/userguide/networking/dockernetworks.md#an-overlay-network), + - as long as the Swarm cluster is configured to use the [overlay driver](https://docs.docker.com/engine/userguide/networking/dockernetworks/#an-overlay-network), or a custom driver which supports multi-host networking. -Read the [Getting started with multi-host -networking](/engine/userguide/networking/get-started-overlay.md) to see how to -set up a Swarm cluster with [Docker Machine](/machine/overview) and the overlay driver. -Once you've got it running, deploying your app to it should be as simple as: +Read [Get started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to +set up a Swarm cluster with [Docker Machine](/machine/overview) and the overlay driver. Once you've got it running, deploying your app to it should be as simple as: $ eval "$(docker-machine env --swarm )" $ docker-compose up diff --git a/docs/wordpress.md b/docs/wordpress.md index 62aec2518..62f50c249 100644 --- a/docs/wordpress.md +++ b/docs/wordpress.md @@ -10,88 +10,133 @@ weight=6 -# Quickstart: Compose and WordPress +# Quickstart: Docker Compose and WordPress -You can use Compose to easily run WordPress in an isolated environment built -with Docker containers. +You can use Docker Compose to easily run WordPress in an isolated environment built +with Docker containers. This quick-start guide demonstrates how to use Compose to set up and run WordPress. Before starting, you'll need to have +[Compose installed](install.md). ## Define the project -First, [Install Compose](install.md) and then download WordPress into the -current directory: +1. Create an empty project directory. - $ curl https://wordpress.org/latest.tar.gz | tar -xvzf - + You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image. -This will create a directory called `wordpress`. If you wish, you can rename it -to the name of your project. + This project directory will contain a `Dockerfile`, a `docker-compose.yaml` file, along with a downloaded `wordpress` directory and a custom `wp-config.php`, all of which you will create in the following steps. -Next, inside that directory, create a `Dockerfile`, a file that defines what -environment your app is going to run in. For more information on how to write -Dockerfiles, see the -[Docker user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the -[Dockerfile reference](https://docs.docker.com/engine/reference/builder/). In -this case, your Dockerfile should be: +2. Change directories into your project directory. - FROM orchardup/php5 - ADD . /code + For example, if you named your directory `my_wordpress`: -This tells Docker how to build an image defining a container that contains PHP -and WordPress. + $ cd my-wordpress/ -Next you'll create a `docker-compose.yml` file that will start your web service -and a separate MySQL instance: +3. Create a `Dockerfile`, a file that defines the environment in which your application will run. - version: '2' - services: - web: - build: . - command: php -S 0.0.0.0:8000 -t /code - ports: - - "8000:8000" - depends_on: - - db - volumes: - - .:/code - db: - image: orchardup/mysql - environment: - MYSQL_DATABASE: wordpress + For more information on how to write Dockerfiles, see the [Docker Engine user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/). -A supporting file is needed to get this working. `wp-config.php` is -the standard WordPress config file with a single change to point the database -configuration at the `db` container: + In this case, your Dockerfile should include these two lines: - + +7. Verify the contents and structure of your project directory. + + + ![WordPress files](images/wordpress-files.png) ### Build the project -With those four files in place, run `docker-compose up` inside your WordPress -directory and it'll pull and build the needed images, and then start the web and -database containers. If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` gives you the machine address and you can open `http://MACHINE_VM_IP:8000` in a browser. +With those four new files in place, run `docker-compose up` from your project directory. This will pull and build the needed images, and then start the web and database containers. + +If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` gives you the machine address and you can open `http://MACHINE_VM_IP:8000` in a browser. + +At this point, WordPress should be running on port `8000` of your Docker Host, and you can complete the "famous five-minute installation" as a WordPress administrator. + +![Choose language for WordPress install](images/wordpress-lang.png) + +![WordPress Welcome](images/wordpress-welcome.png) + ## More Compose documentation diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md index 040a2602b..930af15a8 100644 --- a/project/RELEASE-PROCESS.md +++ b/project/RELEASE-PROCESS.md @@ -55,10 +55,10 @@ Check out the bump branch and run the `build-binaries` script When prompted build the non-linux binaries and test them. -1. Build the Mac binary in a Mountain Lion VM: +1. Download the osx binary from Bintray. Make sure that the latest build has + finished, otherwise you'll be downloading an old binary. - script/prepare-osx - script/build-osx + https://dl.bintray.com/docker-compose/$BRANCH_NAME/ 2. Download the windows binary from AppVeyor @@ -88,7 +88,7 @@ When prompted build the non-linux binaries and test them. ...release notes go here... -5. Attach the binaries and `script/run.sh` +5. Attach the binaries and `script/run/run.sh` 6. Add "Thanks" with a list of contributors. The contributor list can be generated by running `./script/release/contributors`. diff --git a/requirements.txt b/requirements.txt index e25386d24..b9b0f4036 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ PyYAML==3.11 cached-property==1.2.0 -docker-py==1.7.2 +docker-py==1.8.0 dockerpty==0.4.1 docopt==0.6.1 enum34==1.0.4 diff --git a/script/build-image b/script/build/image similarity index 91% rename from script/build-image rename to script/build/image index 897335054..bdd98f03e 100755 --- a/script/build-image +++ b/script/build/image @@ -10,7 +10,7 @@ fi TAG=$1 VERSION="$(python setup.py --version)" -./script/write-git-sha +./script/build/write-git-sha python setup.py sdist cp dist/docker-compose-$VERSION.tar.gz dist/docker-compose-release.tar.gz docker build -t docker/compose:$TAG -f Dockerfile.run . diff --git a/script/build-linux b/script/build/linux similarity index 76% rename from script/build-linux rename to script/build/linux index 47fb45e17..1a4cd4d9b 100755 --- a/script/build-linux +++ b/script/build/linux @@ -7,7 +7,7 @@ set -ex TAG="docker-compose" docker build -t "$TAG" . | tail -n 200 docker run \ - --rm --entrypoint="script/build-linux-inner" \ + --rm --entrypoint="script/build/linux-entrypoint" \ -v $(pwd)/dist:/code/dist \ -v $(pwd)/.git:/code/.git \ "$TAG" diff --git a/script/build-linux-inner b/script/build/linux-entrypoint similarity index 90% rename from script/build-linux-inner rename to script/build/linux-entrypoint index 9bf7c95d9..bf515060a 100755 --- a/script/build-linux-inner +++ b/script/build/linux-entrypoint @@ -9,7 +9,7 @@ mkdir -p `pwd`/dist chmod 777 `pwd`/dist $VENV/bin/pip install -q -r requirements-build.txt -./script/write-git-sha +./script/build/write-git-sha su -c "$VENV/bin/pyinstaller docker-compose.spec" user mv dist/docker-compose $TARGET $TARGET version diff --git a/script/build-osx b/script/build/osx similarity index 92% rename from script/build-osx rename to script/build/osx index 168fd4309..3de345762 100755 --- a/script/build-osx +++ b/script/build/osx @@ -9,7 +9,7 @@ virtualenv -p /usr/local/bin/python venv venv/bin/pip install -r requirements.txt venv/bin/pip install -r requirements-build.txt venv/bin/pip install --no-deps . -./script/write-git-sha +./script/build/write-git-sha venv/bin/pyinstaller docker-compose.spec mv dist/docker-compose dist/docker-compose-Darwin-x86_64 dist/docker-compose-Darwin-x86_64 version diff --git a/script/build-windows.ps1 b/script/build/windows.ps1 similarity index 97% rename from script/build-windows.ps1 rename to script/build/windows.ps1 index 4a2bc1f77..db643274c 100644 --- a/script/build-windows.ps1 +++ b/script/build/windows.ps1 @@ -26,7 +26,7 @@ # # 6. Build the binary: # -# .\script\build-windows.ps1 +# .\script\build\windows.ps1 $ErrorActionPreference = "Stop" diff --git a/script/write-git-sha b/script/build/write-git-sha similarity index 100% rename from script/write-git-sha rename to script/build/write-git-sha diff --git a/script/ci b/script/ci index f30265c02..7b3489a1b 100755 --- a/script/ci +++ b/script/ci @@ -1,21 +1,8 @@ #!/bin/bash -# This should be run inside a container built from the Dockerfile -# at the root of the repo: # -# $ TAG="docker-compose:$(git rev-parse --short HEAD)" -# $ docker build -t "$TAG" . -# $ docker run --rm --volume="/var/run/docker.sock:/var/run/docker.sock" --volume="$(pwd)/.git:/code/.git" -e "TAG=$TAG" --entrypoint="script/ci" "$TAG" - -set -ex - -docker version - -export DOCKER_VERSIONS=all -STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} -export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER" - -GIT_VOLUME="--volumes-from=$(hostname)" -. script/test-versions - ->&2 echo "Building Linux binary" -. script/build-linux-inner +# Backwards compatiblity for jenkins +# +# TODO: remove this script after all current PRs and jenkins are updated with +# the new script/test/ci change +set -e +exec script/test/ci diff --git a/script/dev b/script/dev deleted file mode 100755 index 80b3d0131..000000000 --- a/script/dev +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# This is a script for running Compose inside a Docker container. It's handy for -# development. -# -# $ ln -s `pwd`/script/dev /usr/local/bin/docker-compose -# $ cd /a/compose/project -# $ docker-compose up -# - -set -e - -# Follow symbolic links -if [ -h "$0" ]; then - DIR=$(readlink "$0") -else - DIR=$0 -fi -DIR="$(dirname "$DIR")"/.. - -docker build -t docker-compose $DIR -exec docker run -i -t -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`:`pwd` -w `pwd` docker-compose $@ diff --git a/script/release/build-binaries b/script/release/build-binaries index 083f8eb58..9d4a606e2 100755 --- a/script/release/build-binaries +++ b/script/release/build-binaries @@ -22,16 +22,16 @@ REPO=docker/compose # Build the binaries script/clean -script/build-linux -# TODO: build osx binary -# script/prepare-osx -# script/build-osx -# TODO: build or fetch the windows binary -echo "You need to build the osx/windows binaries, that step is not automated yet." +script/build/linux echo "Building the container distribution" -script/build-image $VERSION +script/build/image $VERSION echo "Create a github release" # TODO: script more of this https://developer.github.com/v3/repos/releases/ browser https://github.com/$REPO/releases/new + +echo "Don't forget to download the osx and windows binaries from appveyor/bintray\!" +echo "https://dl.bintray.com/docker-compose/$BRANCH/" +echo "https://ci.appveyor.com/project/docker/compose" +echo diff --git a/script/release/make-branch b/script/release/make-branch index 46ba6bbca..7ccf3f055 100755 --- a/script/release/make-branch +++ b/script/release/make-branch @@ -65,10 +65,10 @@ git config "branch.${BRANCH}.release" $VERSION editor=${EDITOR:-vim} -echo "Update versions in docs/install.md, compose/__init__.py, script/run.sh" +echo "Update versions in docs/install.md, compose/__init__.py, script/run/run.sh" $editor docs/install.md $editor compose/__init__.py -$editor script/run.sh +$editor script/run/run.sh echo "Write release notes in CHANGELOG.md" @@ -82,20 +82,6 @@ $SHELL || true git commit -a -m "Bump $VERSION" --signoff --no-verify -echo "Push branch to user remote" -GITHUB_USER=$USER -USER_REMOTE="$(find_remote $GITHUB_USER/compose)" -if [ -z "$USER_REMOTE" ]; then - echo "$GITHUB_USER/compose not found" - read -r -p "Enter the name of your GitHub fork (username/repo): " GITHUB_REPO - # assumes there is already a user remote somewhere - USER_REMOTE=$(find_remote $GITHUB_REPO) -fi -if [ -z "$USER_REMOTE" ]; then - >&2 echo "No user remote found. You need to 'git push' your branch." - exit 2 -fi - - -git push $USER_REMOTE -browser https://github.com/$REPO/compare/docker:release...$GITHUB_USER:$BRANCH?expand=1 +echo "Push branch to docker remote" +git push $REMOTE +browser https://github.com/$REPO/compare/docker:release...$BRANCH?expand=1 diff --git a/script/release/push-release b/script/release/push-release index 7d9ec0a2c..33d0d7772 100755 --- a/script/release/push-release +++ b/script/release/push-release @@ -57,7 +57,7 @@ docker push docker/compose:$VERSION echo "Uploading sdist to pypi" pandoc -f markdown -t rst README.md -o README.rst sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst -./script/write-git-sha +./script/build/write-git-sha python setup.py sdist if [ "$(command -v twine 2> /dev/null)" ]; then twine upload ./dist/docker-compose-${VERSION/-/}.tar.gz diff --git a/script/run.ps1 b/script/run/run.ps1 similarity index 92% rename from script/run.ps1 rename to script/run/run.ps1 index f4ff2abb6..47ec54692 100644 --- a/script/run.ps1 +++ b/script/run/run.ps1 @@ -5,7 +5,7 @@ # $Env:DOCKER_COMPOSE_OPTIONS. if ($Env:DOCKER_COMPOSE_VERSION -eq $null -or $Env:DOCKER_COMPOSE_VERSION.Length -eq 0) { - $Env:DOCKER_COMPOSE_VERSION = "1.6.0rc1" + $Env:DOCKER_COMPOSE_VERSION = "latest" } if ($Env:DOCKER_COMPOSE_OPTIONS -eq $null) { diff --git a/script/run.sh b/script/run/run.sh similarity index 98% rename from script/run.sh rename to script/run/run.sh index 212f9b977..98d32c5f8 100755 --- a/script/run.sh +++ b/script/run/run.sh @@ -15,7 +15,7 @@ set -e -VERSION="1.6.2" +VERSION="1.7.0" IMAGE="docker/compose:$VERSION" diff --git a/script/prepare-osx b/script/setup/osx similarity index 100% rename from script/prepare-osx rename to script/setup/osx diff --git a/script/shell b/script/shell deleted file mode 100755 index 903be76fc..000000000 --- a/script/shell +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -set -ex -docker build -t docker-compose . -exec docker run -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`:/code -ti --rm --entrypoint bash docker-compose diff --git a/script/test-versions b/script/test/all similarity index 95% rename from script/test-versions rename to script/test/all index 2e9c91674..08bf16188 100755 --- a/script/test-versions +++ b/script/test/all @@ -6,6 +6,7 @@ set -e >&2 echo "Running lint checks" docker run --rm \ + --tty \ ${GIT_VOLUME} \ --entrypoint="tox" \ "$TAG" -e pre-commit @@ -13,7 +14,7 @@ docker run --rm \ get_versions="docker run --rm --entrypoint=/code/.tox/py27/bin/python $TAG - /code/script/versions.py docker/docker" + /code/script/test/versions.py docker/docker" if [ "$DOCKER_VERSIONS" == "" ]; then DOCKER_VERSIONS="$($get_versions default)" @@ -51,6 +52,7 @@ for version in $DOCKER_VERSIONS; do docker run \ --rm \ + --tty \ --link="$daemon_container:docker" \ --env="DOCKER_HOST=tcp://docker:2375" \ --env="DOCKER_VERSION=$version" \ diff --git a/script/test/ci b/script/test/ci new file mode 100755 index 000000000..c5927b2c9 --- /dev/null +++ b/script/test/ci @@ -0,0 +1,25 @@ +#!/bin/bash +# This should be run inside a container built from the Dockerfile +# at the root of the repo: +# +# $ TAG="docker-compose:$(git rev-parse --short HEAD)" +# $ docker build -t "$TAG" . +# $ docker run --rm \ +# --volume="/var/run/docker.sock:/var/run/docker.sock" \ +# --volume="$(pwd)/.git:/code/.git" \ +# -e "TAG=$TAG" \ +# --entrypoint="script/test/ci" "$TAG" + +set -ex + +docker version + +export DOCKER_VERSIONS=all +STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} +export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER" + +GIT_VOLUME="--volumes-from=$(hostname)" +. script/test/all + +>&2 echo "Building Linux binary" +. script/build/linux-entrypoint diff --git a/script/test b/script/test/default similarity index 92% rename from script/test rename to script/test/default index bdb3579b0..fa741a19d 100755 --- a/script/test +++ b/script/test/default @@ -12,4 +12,4 @@ mkdir -p coverage-html docker build -t "$TAG" . GIT_VOLUME="--volume=$(pwd)/.git:/code/.git" -. script/test-versions +. script/test/all diff --git a/script/versions.py b/script/test/versions.py similarity index 100% rename from script/versions.py rename to script/test/versions.py diff --git a/script/travis/bintray.json.tmpl b/script/travis/bintray.json.tmpl index 7d0adbebc..f9728558a 100644 --- a/script/travis/bintray.json.tmpl +++ b/script/travis/bintray.json.tmpl @@ -1,7 +1,7 @@ { "package": { "name": "${TRAVIS_OS_NAME}", - "repo": "master", + "repo": "${TRAVIS_BRANCH}", "subject": "docker-compose", "desc": "Automated build of master branch from travis ci.", "website_url": "https://github.com/docker/compose", @@ -11,8 +11,8 @@ }, "version": { - "name": "master", - "desc": "Automated build of the master branch.", + "name": "${TRAVIS_BRANCH}", + "desc": "Automated build of the ${TRAVIS_BRANCH} branch.", "released": "${DATE}", "vcs_tag": "master" }, diff --git a/script/travis/build-binary b/script/travis/build-binary index 7cc1092dd..7707a1eee 100755 --- a/script/travis/build-binary +++ b/script/travis/build-binary @@ -3,11 +3,11 @@ set -ex if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then - script/build-linux + script/build/linux # TODO: requires auth to push, so disable for now - # script/build-image master + # script/build/image master # docker push docker/compose:master else - script/prepare-osx - script/build-osx + script/setup/osx + script/build/osx fi diff --git a/setup.py b/setup.py index df4172ce6..7caae97d2 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ install_requires = [ 'requests >= 2.6.1, < 2.8', 'texttable >= 0.8.1, < 0.9', 'websocket-client >= 0.32.0, < 1.0', - 'docker-py >= 1.7.0, < 2', + 'docker-py > 1.7.2, < 2', 'dockerpty >= 0.4.1, < 0.5', 'six >= 1.3.0, < 2', 'jsonschema >= 2.5.1, < 3', diff --git a/tests/acceptance/cli_test.py b/tests/acceptance/cli_test.py index 28f5155aa..707c24926 100644 --- a/tests/acceptance/cli_test.py +++ b/tests/acceptance/cli_test.py @@ -8,6 +8,7 @@ import shlex import signal import subprocess import time +from collections import Counter from collections import namedtuple from operator import attrgetter @@ -17,6 +18,7 @@ from docker import errors from .. import mock from compose.cli.command import get_project from compose.container import Container +from compose.project import OneOffFilter from tests.integration.testcases import DockerClientTestCase from tests.integration.testcases import get_links from tests.integration.testcases import pull_busybox @@ -77,21 +79,20 @@ class ContainerCountCondition(object): class ContainerStateCondition(object): - def __init__(self, client, name, running): + def __init__(self, client, name, status): self.client = client self.name = name - self.running = running + self.status = status def __call__(self): try: container = self.client.inspect_container(self.name) - return container['State']['Running'] == self.running + return container['State']['Status'] == self.status except errors.APIError: return False def __str__(self): - state = 'running' if self.running else 'stopped' - return "waiting for container to be %s" % state + return "waiting for container to be %s" % self.status class CLITestCase(DockerClientTestCase): @@ -105,7 +106,7 @@ class CLITestCase(DockerClientTestCase): self.project.kill() self.project.remove_stopped() - for container in self.project.containers(stopped=True, one_off=True): + for container in self.project.containers(stopped=True, one_off=OneOffFilter.only): container.remove(force=True) networks = self.client.networks() @@ -365,14 +366,22 @@ class CLITestCase(DockerClientTestCase): @v2_only() def test_down(self): self.base_dir = 'tests/fixtures/v2-full' + self.dispatch(['up', '-d']) wait_on_condition(ContainerCountCondition(self.project, 2)) + self.dispatch(['run', 'web', 'true']) + self.dispatch(['run', '-d', 'web', 'tail', '-f', '/dev/null']) + assert len(self.project.containers(one_off=OneOffFilter.only, stopped=True)) == 2 + result = self.dispatch(['down', '--rmi=local', '--volumes']) assert 'Stopping v2full_web_1' in result.stderr assert 'Stopping v2full_other_1' in result.stderr + assert 'Stopping v2full_web_run_2' in result.stderr assert 'Removing v2full_web_1' in result.stderr assert 'Removing v2full_other_1' in result.stderr + assert 'Removing v2full_web_run_1' in result.stderr + assert 'Removing v2full_web_run_2' in result.stderr assert 'Removing volume v2full_data' in result.stderr assert 'Removing image v2full_web' in result.stderr assert 'Removing image busybox' not in result.stderr @@ -396,8 +405,10 @@ class CLITestCase(DockerClientTestCase): self.base_dir = 'tests/fixtures/echo-services' result = self.dispatch(['up', '--no-color']) - assert 'simple_1 | simple' in result.stdout - assert 'another_1 | another' in result.stdout + assert 'simple_1 | simple' in result.stdout + assert 'another_1 | another' in result.stdout + assert 'simple_1 exited with code 0' in result.stdout + assert 'another_1 exited with code 0' in result.stdout @v2_only() def test_up(self): @@ -473,6 +484,30 @@ class CLITestCase(DockerClientTestCase): assert 'forward_facing' in front_aliases assert 'ahead' in front_aliases + @v2_only() + def test_up_with_network_static_addresses(self): + filename = 'network-static-addresses.yml' + ipv4_address = '172.16.100.100' + ipv6_address = 'fe80::1001:100' + self.base_dir = 'tests/fixtures/networks' + self.dispatch(['-f', filename, 'up', '-d'], None) + static_net = '{}_static_test'.format(self.project.name) + + networks = [ + n for n in self.client.networks() + if n['Name'].startswith('{}_'.format(self.project.name)) + ] + + # One networks was created: front + assert sorted(n['Name'] for n in networks) == [static_net] + web_container = self.project.get_service('web').containers()[0] + + ipam_config = web_container.get( + 'NetworkSettings.Networks.{}.IPAMConfig'.format(static_net) + ) + assert ipv4_address in ipam_config.values() + assert ipv6_address in ipam_config.values() + @v2_only() def test_up_with_networks(self): self.base_dir = 'tests/fixtures/networks' @@ -752,13 +787,31 @@ class CLITestCase(DockerClientTestCase): self.project.stop(['simple']) wait_on_condition(ContainerCountCondition(self.project, 0)) + def test_exec_without_tty(self): + self.base_dir = 'tests/fixtures/links-composefile' + self.dispatch(['up', '-d', 'console']) + self.assertEqual(len(self.project.containers()), 1) + + stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/']) + self.assertEquals(stdout, "/\n") + self.assertEquals(stderr, "") + + def test_exec_custom_user(self): + self.base_dir = 'tests/fixtures/links-composefile' + self.dispatch(['up', '-d', 'console']) + self.assertEqual(len(self.project.containers()), 1) + + stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami']) + self.assertEquals(stdout, "operator\n") + self.assertEquals(stderr, "") + def test_run_service_without_links(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['run', 'console', '/bin/true']) self.assertEqual(len(self.project.containers()), 0) # Ensure stdin/out was open - container = self.project.containers(stopped=True, one_off=True)[0] + container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] config = container.inspect()['Config'] self.assertTrue(config['AttachStderr']) self.assertTrue(config['AttachStdout']) @@ -808,7 +861,7 @@ class CLITestCase(DockerClientTestCase): self.dispatch(['run', 'implicit']) service = self.project.get_service('implicit') - containers = service.containers(stopped=True, one_off=True) + containers = service.containers(stopped=True, one_off=OneOffFilter.only) self.assertEqual( [c.human_readable_command for c in containers], [u'/bin/sh -c echo "success"'], @@ -816,7 +869,7 @@ class CLITestCase(DockerClientTestCase): self.dispatch(['run', 'explicit']) service = self.project.get_service('explicit') - containers = service.containers(stopped=True, one_off=True) + containers = service.containers(stopped=True, one_off=OneOffFilter.only) self.assertEqual( [c.human_readable_command for c in containers], [u'/bin/true'], @@ -827,7 +880,7 @@ class CLITestCase(DockerClientTestCase): name = 'service' self.dispatch(['run', '--entrypoint', '/bin/echo', name, 'helloworld']) service = self.project.get_service(name) - container = service.containers(stopped=True, one_off=True)[0] + container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] self.assertEqual( shlex.split(container.human_readable_command), [u'/bin/echo', u'helloworld'], @@ -839,7 +892,7 @@ class CLITestCase(DockerClientTestCase): user = 'sshd' self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1) service = self.project.get_service(name) - container = service.containers(stopped=True, one_off=True)[0] + container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] self.assertEqual(user, container.get('Config.User')) def test_run_service_with_user_overridden_short_form(self): @@ -848,7 +901,7 @@ class CLITestCase(DockerClientTestCase): user = 'sshd' self.dispatch(['run', '-u', user, name], returncode=1) service = self.project.get_service(name) - container = service.containers(stopped=True, one_off=True)[0] + container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] self.assertEqual(user, container.get('Config.User')) def test_run_service_with_environement_overridden(self): @@ -862,7 +915,7 @@ class CLITestCase(DockerClientTestCase): '/bin/true', ]) service = self.project.get_service(name) - container = service.containers(stopped=True, one_off=True)[0] + container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] # env overriden self.assertEqual('notbar', container.environment['foo']) # keep environement from yaml @@ -876,7 +929,7 @@ class CLITestCase(DockerClientTestCase): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', 'simple']) - container = self.project.get_service('simple').containers(one_off=True)[0] + container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_random = container.get_local_port(3000) @@ -893,7 +946,7 @@ class CLITestCase(DockerClientTestCase): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', '--service-ports', 'simple']) - container = self.project.get_service('simple').containers(one_off=True)[0] + container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_random = container.get_local_port(3000) @@ -914,7 +967,7 @@ class CLITestCase(DockerClientTestCase): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple']) - container = self.project.get_service('simple').containers(one_off=True)[0] + container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_short = container.get_local_port(3000) @@ -930,8 +983,13 @@ class CLITestCase(DockerClientTestCase): def test_run_service_with_explicitly_maped_ip_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' - self.dispatch(['run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple'], None) - container = self.project.get_service('simple').containers(one_off=True)[0] + self.dispatch([ + 'run', '-d', + '-p', '127.0.0.1:30000:3000', + '--publish', '127.0.0.1:30001:3001', + 'simple' + ]) + container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_short = container.get_local_port(3000) @@ -948,7 +1006,7 @@ class CLITestCase(DockerClientTestCase): # create one off container self.base_dir = 'tests/fixtures/expose-composefile' self.dispatch(['run', '-d', '--service-ports', 'simple']) - container = self.project.get_service('simple').containers(one_off=True)[0] + container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] ports = container.ports self.assertEqual(len(ports), 9) @@ -972,9 +1030,27 @@ class CLITestCase(DockerClientTestCase): self.dispatch(['run', '--name', name, 'service', '/bin/true']) service = self.project.get_service('service') - container, = service.containers(stopped=True, one_off=True) + container, = service.containers(stopped=True, one_off=OneOffFilter.only) self.assertEqual(container.name, name) + def test_run_service_with_workdir_overridden(self): + self.base_dir = 'tests/fixtures/run-workdir' + name = 'service' + workdir = '/var' + self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name]) + service = self.project.get_service(name) + container = service.containers(stopped=True, one_off=True)[0] + self.assertEqual(workdir, container.get('Config.WorkingDir')) + + def test_run_service_with_workdir_overridden_short_form(self): + self.base_dir = 'tests/fixtures/run-workdir' + name = 'service' + workdir = '/var' + self.dispatch(['run', '-w', workdir, name]) + service = self.project.get_service(name) + container = service.containers(stopped=True, one_off=True)[0] + self.assertEqual(workdir, container.get('Config.WorkingDir')) + @v2_only() def test_run_interactive_connects_to_network(self): self.base_dir = 'tests/fixtures/networks' @@ -984,7 +1060,7 @@ class CLITestCase(DockerClientTestCase): self.dispatch(['run', 'app', 'nslookup', 'db']) containers = self.project.get_service('app').containers( - stopped=True, one_off=True) + stopped=True, one_off=OneOffFilter.only) assert len(containers) == 2 for container in containers: @@ -1004,7 +1080,7 @@ class CLITestCase(DockerClientTestCase): self.dispatch(['up', '-d']) self.dispatch(['run', '-d', 'app', 'top']) - container = self.project.get_service('app').containers(one_off=True)[0] + container = self.project.get_service('app').containers(one_off=OneOffFilter.only)[0] networks = container.get('NetworkSettings.Networks') assert sorted(list(networks)) == [ @@ -1023,26 +1099,26 @@ class CLITestCase(DockerClientTestCase): wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', - running=True)) + 'running')) os.kill(proc.pid, signal.SIGINT) wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', - running=False)) + 'exited')) def test_run_handles_sigterm(self): proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top']) wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', - running=True)) + 'running')) os.kill(proc.pid, signal.SIGTERM) wait_on_condition(ContainerStateCondition( self.project.client, 'simplecomposefile_simple_run_1', - running=False)) + 'exited')) def test_rm(self): service = self.project.get_service('simple') @@ -1058,6 +1134,28 @@ class CLITestCase(DockerClientTestCase): self.dispatch(['rm', '-f'], None) self.assertEqual(len(service.containers(stopped=True)), 0) + def test_rm_all(self): + service = self.project.get_service('simple') + service.create_container(one_off=False) + service.create_container(one_off=True) + kill_service(service) + self.assertEqual(len(service.containers(stopped=True)), 1) + self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1) + self.dispatch(['rm', '-f'], None) + self.assertEqual(len(service.containers(stopped=True)), 0) + self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1) + self.dispatch(['rm', '-f', '-a'], None) + self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0) + + service.create_container(one_off=False) + service.create_container(one_off=True) + kill_service(service) + self.assertEqual(len(service.containers(stopped=True)), 1) + self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1) + self.dispatch(['rm', '-f', '--all'], None) + self.assertEqual(len(service.containers(stopped=True)), 0) + self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0) + def test_stop(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') @@ -1136,6 +1234,69 @@ class CLITestCase(DockerClientTestCase): def test_logs_invalid_service_name(self): self.dispatch(['logs', 'madeupname'], returncode=1) + def test_logs_follow(self): + self.base_dir = 'tests/fixtures/echo-services' + self.dispatch(['up', '-d']) + + result = self.dispatch(['logs', '-f']) + + assert result.stdout.count('\n') == 5 + assert 'simple' in result.stdout + assert 'another' in result.stdout + assert 'exited with code 0' in result.stdout + + def test_logs_follow_logs_from_new_containers(self): + self.base_dir = 'tests/fixtures/logs-composefile' + self.dispatch(['up', '-d', 'simple']) + + proc = start_process(self.base_dir, ['logs', '-f']) + + self.dispatch(['up', '-d', 'another']) + wait_on_condition(ContainerStateCondition( + self.project.client, + 'logscomposefile_another_1', + 'exited')) + + # sleep for a short period to allow the tailing thread to receive the + # event. This is not great, but there isn't an easy way to do this + # without being able to stream stdout from the process. + time.sleep(0.5) + os.kill(proc.pid, signal.SIGINT) + result = wait_on_process(proc, returncode=1) + assert 'test' in result.stdout + + def test_logs_default(self): + self.base_dir = 'tests/fixtures/logs-composefile' + self.dispatch(['up', '-d']) + + result = self.dispatch(['logs']) + assert 'hello' in result.stdout + assert 'test' in result.stdout + assert 'exited with' not in result.stdout + + def test_logs_on_stopped_containers_exits(self): + self.base_dir = 'tests/fixtures/echo-services' + self.dispatch(['up']) + + result = self.dispatch(['logs']) + assert 'simple' in result.stdout + assert 'another' in result.stdout + assert 'exited with' not in result.stdout + + def test_logs_timestamps(self): + self.base_dir = 'tests/fixtures/echo-services' + self.dispatch(['up', '-d']) + + result = self.dispatch(['logs', '-f', '-t']) + self.assertRegexpMatches(result.stdout, '(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})') + + def test_logs_tail(self): + self.base_dir = 'tests/fixtures/logs-tail-composefile' + self.dispatch(['up']) + + result = self.dispatch(['logs', '--tail', '2']) + assert result.stdout.count('\n') == 3 + def test_kill(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') @@ -1261,7 +1422,7 @@ class CLITestCase(DockerClientTestCase): os.kill(events_proc.pid, signal.SIGINT) result = wait_on_process(events_proc, returncode=1) lines = [json.loads(line) for line in result.stdout.rstrip().split('\n')] - assert [e['action'] for e in lines] == ['create', 'start', 'create', 'start'] + assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2} def test_events_human_readable(self): events_proc = start_process(self.base_dir, ['events']) diff --git a/tests/fixtures/default-env-file/.env b/tests/fixtures/default-env-file/.env new file mode 100644 index 000000000..996c886cb --- /dev/null +++ b/tests/fixtures/default-env-file/.env @@ -0,0 +1,4 @@ +IMAGE=alpine:latest +COMMAND=true +PORT1=5643 +PORT2=9999 \ No newline at end of file diff --git a/tests/fixtures/default-env-file/docker-compose.yml b/tests/fixtures/default-env-file/docker-compose.yml new file mode 100644 index 000000000..aa8e4409e --- /dev/null +++ b/tests/fixtures/default-env-file/docker-compose.yml @@ -0,0 +1,6 @@ +web: + image: ${IMAGE} + command: ${COMMAND} + ports: + - $PORT1 + - $PORT2 diff --git a/tests/fixtures/logs-composefile/docker-compose.yml b/tests/fixtures/logs-composefile/docker-compose.yml new file mode 100644 index 000000000..0af9d805c --- /dev/null +++ b/tests/fixtures/logs-composefile/docker-compose.yml @@ -0,0 +1,6 @@ +simple: + image: busybox:latest + command: sh -c "echo hello && sleep 200" +another: + image: busybox:latest + command: sh -c "echo test" diff --git a/tests/fixtures/logs-tail-composefile/docker-compose.yml b/tests/fixtures/logs-tail-composefile/docker-compose.yml new file mode 100644 index 000000000..80d8feaec --- /dev/null +++ b/tests/fixtures/logs-tail-composefile/docker-compose.yml @@ -0,0 +1,3 @@ +simple: + image: busybox:latest + command: sh -c "echo a && echo b && echo c && echo d" diff --git a/tests/fixtures/networks/network-static-addresses.yml b/tests/fixtures/networks/network-static-addresses.yml new file mode 100755 index 000000000..f820ff6a4 --- /dev/null +++ b/tests/fixtures/networks/network-static-addresses.yml @@ -0,0 +1,23 @@ +version: "2" + +services: + web: + image: busybox + command: top + networks: + static_test: + ipv4_address: 172.16.100.100 + ipv6_address: fe80::1001:100 + +networks: + static_test: + driver: bridge + driver_opts: + com.docker.network.enable_ipv6: "true" + ipam: + driver: default + config: + - subnet: 172.16.100.0/24 + gateway: 172.16.100.1 + - subnet: fe80::/64 + gateway: fe80::1001:1 diff --git a/tests/fixtures/run-workdir/docker-compose.yml b/tests/fixtures/run-workdir/docker-compose.yml new file mode 100644 index 000000000..dc3ea86a0 --- /dev/null +++ b/tests/fixtures/run-workdir/docker-compose.yml @@ -0,0 +1,4 @@ +service: + image: busybox:latest + working_dir: /etc + command: /bin/true diff --git a/tests/fixtures/tls/ca.pem b/tests/fixtures/tls/ca.pem new file mode 100644 index 000000000..e69de29bb diff --git a/tests/fixtures/tls/cert.pem b/tests/fixtures/tls/cert.pem new file mode 100644 index 000000000..e69de29bb diff --git a/tests/fixtures/tls/key.key b/tests/fixtures/tls/key.key new file mode 100644 index 000000000..e69de29bb diff --git a/tests/helpers.py b/tests/helpers.py new file mode 100644 index 000000000..4b422a6a0 --- /dev/null +++ b/tests/helpers.py @@ -0,0 +1,17 @@ +from __future__ import absolute_import +from __future__ import unicode_literals + +from compose.config.config import ConfigDetails +from compose.config.config import ConfigFile +from compose.config.config import load + + +def build_config(contents, **kwargs): + return load(build_config_details(contents, **kwargs)) + + +def build_config_details(contents, working_dir='working_dir', filename='filename.yml'): + return ConfigDetails( + working_dir, + [ConfigFile(filename, contents)], + ) diff --git a/tests/integration/project_test.py b/tests/integration/project_test.py index 6542fa18e..d1732d1e4 100644 --- a/tests/integration/project_test.py +++ b/tests/integration/project_test.py @@ -7,6 +7,8 @@ import py import pytest from docker.errors import NotFound +from .. import mock +from ..helpers import build_config from .testcases import DockerClientTestCase from compose.config import config from compose.config import ConfigurationError @@ -14,19 +16,13 @@ from compose.config.config import V2_0 from compose.config.types import VolumeFromSpec from compose.config.types import VolumeSpec from compose.const import LABEL_PROJECT +from compose.const import LABEL_SERVICE from compose.container import Container from compose.project import Project from compose.service import ConvergenceStrategy from tests.integration.testcases import v2_only -def build_service_dicts(service_config): - return config.load( - config.ConfigDetails( - 'working_dir', - [config.ConfigFile(None, service_config)])) - - class ProjectTest(DockerClientTestCase): def test_containers(self): @@ -67,19 +63,18 @@ class ProjectTest(DockerClientTestCase): ) def test_volumes_from_service(self): - service_dicts = build_service_dicts({ - 'data': { - 'image': 'busybox:latest', - 'volumes': ['/var/data'], - }, - 'db': { - 'image': 'busybox:latest', - 'volumes_from': ['data'], - }, - }) project = Project.from_config( name='composetest', - config_data=service_dicts, + config_data=build_config({ + 'data': { + 'image': 'busybox:latest', + 'volumes': ['/var/data'], + }, + 'db': { + 'image': 'busybox:latest', + 'volumes_from': ['data'], + }, + }), client=self.client, ) db = project.get_service('db') @@ -96,7 +91,7 @@ class ProjectTest(DockerClientTestCase): ) project = Project.from_config( name='composetest', - config_data=build_service_dicts({ + config_data=build_config({ 'db': { 'image': 'busybox:latest', 'volumes_from': ['composetest_data_container'], @@ -112,7 +107,7 @@ class ProjectTest(DockerClientTestCase): project = Project.from_config( name='composetest', client=self.client, - config_data=build_service_dicts({ + config_data=build_config({ 'version': V2_0, 'services': { 'net': { @@ -139,7 +134,7 @@ class ProjectTest(DockerClientTestCase): def get_project(): return Project.from_config( name='composetest', - config_data=build_service_dicts({ + config_data=build_config({ 'version': V2_0, 'services': { 'web': { @@ -174,7 +169,7 @@ class ProjectTest(DockerClientTestCase): def test_net_from_service_v1(self): project = Project.from_config( name='composetest', - config_data=build_service_dicts({ + config_data=build_config({ 'net': { 'image': 'busybox:latest', 'command': ["top"] @@ -198,7 +193,7 @@ class ProjectTest(DockerClientTestCase): def get_project(): return Project.from_config( name='composetest', - config_data=build_service_dicts({ + config_data=build_config({ 'web': { 'image': 'busybox:latest', 'net': 'container:composetest_net_container' @@ -242,19 +237,24 @@ class ProjectTest(DockerClientTestCase): db_container = db.create_container() project.start(service_names=['web']) - self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name])) + self.assertEqual( + set(c.name for c in project.containers()), + set([web_container_1.name, web_container_2.name])) project.start() - self.assertEqual(set(c.name for c in project.containers()), - set([web_container_1.name, web_container_2.name, db_container.name])) + self.assertEqual( + set(c.name for c in project.containers()), + set([web_container_1.name, web_container_2.name, db_container.name])) project.pause(service_names=['web']) - self.assertEqual(set([c.name for c in project.containers() if c.is_paused]), - set([web_container_1.name, web_container_2.name])) + self.assertEqual( + set([c.name for c in project.containers() if c.is_paused]), + set([web_container_1.name, web_container_2.name])) project.pause() - self.assertEqual(set([c.name for c in project.containers() if c.is_paused]), - set([web_container_1.name, web_container_2.name, db_container.name])) + self.assertEqual( + set([c.name for c in project.containers() if c.is_paused]), + set([web_container_1.name, web_container_2.name, db_container.name])) project.unpause(service_names=['db']) self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 2) @@ -464,7 +464,7 @@ class ProjectTest(DockerClientTestCase): def test_project_up_starts_depends(self): project = Project.from_config( name='composetest', - config_data=build_service_dicts({ + config_data=build_config({ 'console': { 'image': 'busybox:latest', 'command': ["top"], @@ -499,7 +499,7 @@ class ProjectTest(DockerClientTestCase): def test_project_up_with_no_deps(self): project = Project.from_config( name='composetest', - config_data=build_service_dicts({ + config_data=build_config({ 'console': { 'image': 'busybox:latest', 'command': ["top"], @@ -652,6 +652,96 @@ class ProjectTest(DockerClientTestCase): }], } + @v2_only() + def test_up_with_network_static_addresses(self): + config_data = config.Config( + version=V2_0, + services=[{ + 'name': 'web', + 'image': 'busybox:latest', + 'command': 'top', + 'networks': { + 'static_test': { + 'ipv4_address': '172.16.100.100', + 'ipv6_address': 'fe80::1001:102' + } + }, + }], + volumes={}, + networks={ + 'static_test': { + 'driver': 'bridge', + 'driver_opts': { + "com.docker.network.enable_ipv6": "true", + }, + 'ipam': { + 'driver': 'default', + 'config': [ + {"subnet": "172.16.100.0/24", + "gateway": "172.16.100.1"}, + {"subnet": "fe80::/64", + "gateway": "fe80::1001:1"} + ] + } + } + } + ) + project = Project.from_config( + client=self.client, + name='composetest', + config_data=config_data, + ) + project.up(detached=True) + + network = self.client.networks(names=['static_test'])[0] + service_container = project.get_service('web').containers()[0] + + assert network['Options'] == { + "com.docker.network.enable_ipv6": "true" + } + + IPAMConfig = (service_container.inspect().get('NetworkSettings', {}). + get('Networks', {}).get('composetest_static_test', {}). + get('IPAMConfig', {})) + assert IPAMConfig.get('IPv4Address') == '172.16.100.100' + assert IPAMConfig.get('IPv6Address') == 'fe80::1001:102' + + @v2_only() + def test_up_with_network_static_addresses_missing_subnet(self): + config_data = config.Config( + version=V2_0, + services=[{ + 'name': 'web', + 'image': 'busybox:latest', + 'networks': { + 'static_test': { + 'ipv4_address': '172.16.100.100', + 'ipv6_address': 'fe80::1001:101' + } + }, + }], + volumes={}, + networks={ + 'static_test': { + 'driver': 'bridge', + 'driver_opts': { + "com.docker.network.enable_ipv6": "true", + }, + 'ipam': { + 'driver': 'default', + }, + }, + }, + ) + + project = Project.from_config( + client=self.client, + name='composetest', + config_data=config_data, + ) + + assert len(project.up()) == 0 + @v2_only() def test_project_up_volumes(self): vol_name = '{0:x}'.format(random.getrandbits(32)) @@ -841,6 +931,44 @@ class ProjectTest(DockerClientTestCase): vol_name ) in str(e.exception) + @v2_only() + def test_initialize_volumes_updated_blank_driver(self): + vol_name = '{0:x}'.format(random.getrandbits(32)) + full_vol_name = 'composetest_{0}'.format(vol_name) + + config_data = config.Config( + version=V2_0, + services=[{ + 'name': 'web', + 'image': 'busybox:latest', + 'command': 'top' + }], + volumes={vol_name: {'driver': 'local'}}, + networks={}, + ) + project = Project.from_config( + name='composetest', + config_data=config_data, client=self.client + ) + project.volumes.initialize() + + volume_data = self.client.inspect_volume(full_vol_name) + self.assertEqual(volume_data['Name'], full_vol_name) + self.assertEqual(volume_data['Driver'], 'local') + + config_data = config_data._replace( + volumes={vol_name: {}} + ) + project = Project.from_config( + name='composetest', + config_data=config_data, + client=self.client + ) + project.volumes.initialize() + volume_data = self.client.inspect_volume(full_vol_name) + self.assertEqual(volume_data['Name'], full_vol_name) + self.assertEqual(volume_data['Driver'], 'local') + @v2_only() def test_initialize_volumes_external_volumes(self): # Use composetest_ prefix so it gets garbage-collected in tearDown() @@ -930,3 +1058,40 @@ class ProjectTest(DockerClientTestCase): container = service.get_container() assert [mount['Name'] for mount in container.get('Mounts')] == [full_vol_name] assert next((v for v in engine_volumes if v['Name'] == vol_name), None) is None + + def test_project_up_orphans(self): + config_dict = { + 'service1': { + 'image': 'busybox:latest', + 'command': 'top', + } + } + + config_data = build_config(config_dict) + project = Project.from_config( + name='composetest', config_data=config_data, client=self.client + ) + project.up() + config_dict['service2'] = config_dict['service1'] + del config_dict['service1'] + + config_data = build_config(config_dict) + project = Project.from_config( + name='composetest', config_data=config_data, client=self.client + ) + with mock.patch('compose.project.log') as mock_log: + project.up() + + mock_log.warning.assert_called_once_with(mock.ANY) + + assert len([ + ctnr for ctnr in project._labeled_containers() + if ctnr.labels.get(LABEL_SERVICE) == 'service1' + ]) == 1 + + project.up(remove_orphans=True) + + assert len([ + ctnr for ctnr in project._labeled_containers() + if ctnr.labels.get(LABEL_SERVICE) == 'service1' + ]) == 0 diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py index bbfcd8ec9..df50d513a 100644 --- a/tests/integration/service_test.py +++ b/tests/integration/service_test.py @@ -6,6 +6,7 @@ import shutil import tempfile from os import path +import pytest from docker.errors import APIError from six import StringIO from six import text_type @@ -24,10 +25,12 @@ from compose.const import LABEL_PROJECT from compose.const import LABEL_SERVICE from compose.const import LABEL_VERSION from compose.container import Container +from compose.project import OneOffFilter from compose.service import ConvergencePlan from compose.service import ConvergenceStrategy from compose.service import NetworkMode from compose.service import Service +from tests.integration.testcases import v2_only def create_and_start_container(service, **override_options): @@ -60,7 +63,7 @@ class ServiceTest(DockerClientTestCase): db = self.create_service('db') container = db.create_container(one_off=True) self.assertEqual(db.containers(stopped=True), []) - self.assertEqual(db.containers(one_off=True, stopped=True), [container]) + self.assertEqual(db.containers(one_off=OneOffFilter.only, stopped=True), [container]) def test_project_is_added_to_container_name(self): service = self.create_service('web') @@ -102,6 +105,13 @@ class ServiceTest(DockerClientTestCase): container.start() self.assertEqual(container.get('HostConfig.CpuQuota'), 40000) + def test_create_container_with_shm_size(self): + self.require_api_version('1.22') + service = self.create_service('db', shm_size=67108864) + container = service.create_container() + service.start_container(container) + self.assertEqual(container.get('HostConfig.ShmSize'), 67108864) + def test_create_container_with_extra_hosts_list(self): extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229'] service = self.create_service('db', extra_hosts=extra_hosts) @@ -128,7 +138,7 @@ class ServiceTest(DockerClientTestCase): service = self.create_service('db', read_only=read_only) container = service.create_container() service.start_container(container) - self.assertEqual(container.get('HostConfig.ReadonlyRootfs'), read_only, container.get('HostConfig')) + assert container.get('HostConfig.ReadonlyRootfs') == read_only def test_create_container_with_security_opt(self): security_opt = ['label:disable'] @@ -402,7 +412,9 @@ class ServiceTest(DockerClientTestCase): self.assertEqual(len(service.containers()), 0) self.assertEqual(len(service.containers(stopped=True)), 1) - containers = service.execute_convergence_plan(ConvergencePlan('recreate', containers), start=False) + containers = service.execute_convergence_plan( + ConvergencePlan('recreate', containers), + start=False) self.assertEqual(len(service.containers()), 0) self.assertEqual(len(service.containers(stopped=True)), 1) @@ -485,7 +497,7 @@ class ServiceTest(DockerClientTestCase): create_and_start_container(db) create_and_start_container(db) - c = create_and_start_container(db, one_off=True) + c = create_and_start_container(db, one_off=OneOffFilter.only) self.assertEqual( set(get_links(c)), @@ -726,7 +738,7 @@ class ServiceTest(DockerClientTestCase): self.assertEqual(len(service.containers()), 1) self.assertTrue(service.containers()[0].is_running) - self.assertIn("ERROR: for 2 Boom", mock_stderr.getvalue()) + self.assertIn("ERROR: for composetest_web_2 Boom", mock_stderr.getvalue()) def test_scale_with_unexpected_exception(self): """Test that when scaling if the API returns an error, that is not of type @@ -757,17 +769,17 @@ class ServiceTest(DockerClientTestCase): container = service.create_container(number=next_number, quiet=True) container.start() - self.assertTrue(container.is_running) - self.assertEqual(len(service.containers()), 1) + container.inspect() + assert container.is_running + assert len(service.containers()) == 1 service.scale(1) - - self.assertEqual(len(service.containers()), 1) + assert len(service.containers()) == 1 container.inspect() - self.assertTrue(container.is_running) + assert container.is_running captured_output = mock_log.info.call_args[0] - self.assertIn('Desired container number already achieved', captured_output) + assert 'Desired container number already achieved' in captured_output @mock.patch('compose.service.log') def test_scale_with_custom_container_name_outputs_warning(self, mock_log): @@ -775,7 +787,7 @@ class ServiceTest(DockerClientTestCase): results in warning output. """ service = self.create_service('app', container_name='custom-container') - self.assertEqual(service.custom_container_name(), 'custom-container') + self.assertEqual(service.custom_container_name, 'custom-container') service.scale(3) @@ -793,7 +805,9 @@ class ServiceTest(DockerClientTestCase): containers = service.containers() self.assertEqual(len(containers), 2) for container in containers: - self.assertEqual(list(container.inspect()['HostConfig']['PortBindings'].keys()), ['8000/tcp']) + self.assertEqual( + list(container.get('HostConfig.PortBindings')), + ['8000/tcp']) def test_scale_with_immediate_exit(self): service = self.create_service('web', image='busybox', command='true') @@ -864,13 +878,21 @@ class ServiceTest(DockerClientTestCase): container = create_and_start_container(service) self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com']) + @v2_only() + def test_tmpfs(self): + service = self.create_service('web', tmpfs=['/run']) + container = create_and_start_container(service) + self.assertEqual(container.get('HostConfig.Tmpfs'), {'/run': ''}) + def test_working_dir_param(self): service = self.create_service('container', working_dir='/working/dir/sample') container = service.create_container() self.assertEqual(container.get('Config.WorkingDir'), '/working/dir/sample') def test_split_env(self): - service = self.create_service('web', environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS=']) + service = self.create_service( + 'web', + environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS=']) env = create_and_start_container(service).environment for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items(): self.assertEqual(env[k], v) @@ -956,7 +978,7 @@ class ServiceTest(DockerClientTestCase): def test_custom_container_name(self): service = self.create_service('web', container_name='my-web-container') - self.assertEqual(service.custom_container_name(), 'my-web-container') + self.assertEqual(service.custom_container_name, 'my-web-container') container = create_and_start_container(service) self.assertEqual(container.name, 'my-web-container') @@ -964,6 +986,7 @@ class ServiceTest(DockerClientTestCase): one_off_container = service.create_container(one_off=True) self.assertNotEqual(one_off_container.name, 'my-web-container') + @pytest.mark.skipif(True, reason="Broken on 1.11.0rc1") def test_log_drive_invalid(self): service = self.create_service('web', logging={'driver': 'xxx'}) expected_error_msg = "logger: no log driver named 'xxx' is registered" @@ -1014,12 +1037,10 @@ class ServiceTest(DockerClientTestCase): self.assertEqual(set(service.duplicate_containers()), set([duplicate])) -def converge(service, - strategy=ConvergenceStrategy.changed, - do_build=True): +def converge(service, strategy=ConvergenceStrategy.changed): """Create a converge plan from a strategy and execute the plan.""" plan = service.convergence_plan(strategy) - return service.execute_convergence_plan(plan, do_build=do_build, timeout=1) + return service.execute_convergence_plan(plan, timeout=1) class ConfigHashTest(DockerClientTestCase): diff --git a/tests/integration/state_test.py b/tests/integration/state_test.py index 36099d2dd..07b28e784 100644 --- a/tests/integration/state_test.py +++ b/tests/integration/state_test.py @@ -38,8 +38,8 @@ class BasicProjectTest(ProjectTestCase): super(BasicProjectTest, self).setUp() self.cfg = { - 'db': {'image': 'busybox:latest'}, - 'web': {'image': 'busybox:latest'}, + 'db': {'image': 'busybox:latest', 'command': 'top'}, + 'web': {'image': 'busybox:latest', 'command': 'top'}, } def test_no_change(self): diff --git a/tests/integration/testcases.py b/tests/integration/testcases.py index 8e2f25937..8d69d5319 100644 --- a/tests/integration/testcases.py +++ b/tests/integration/testcases.py @@ -12,6 +12,7 @@ from compose.cli.docker_client import docker_client from compose.config.config import resolve_environment from compose.config.config import V1 from compose.config.config import V2_0 +from compose.config.environment import Environment from compose.const import API_VERSIONS from compose.const import LABEL_PROJECT from compose.progress_stream import stream_output @@ -60,7 +61,7 @@ class DockerClientTestCase(unittest.TestCase): else: version = API_VERSIONS[V2_0] - cls.client = docker_client(version) + cls.client = docker_client(Environment(), version) def tearDown(self): for c in self.client.containers( @@ -89,7 +90,9 @@ class DockerClientTestCase(unittest.TestCase): if 'command' not in kwargs: kwargs['command'] = ["top"] - kwargs['environment'] = resolve_environment(kwargs) + kwargs['environment'] = resolve_environment( + kwargs, Environment.from_env_file(None) + ) labels = dict(kwargs.setdefault('labels', {})) labels['com.docker.compose.test-name'] = self.id() diff --git a/tests/unit/cli/command_test.py b/tests/unit/cli/command_test.py index 180446721..3502d6369 100644 --- a/tests/unit/cli/command_test.py +++ b/tests/unit/cli/command_test.py @@ -1,23 +1,48 @@ from __future__ import absolute_import from __future__ import unicode_literals +import os + import pytest -from requests.exceptions import ConnectionError -from compose.cli import errors -from compose.cli.command import friendly_error_message +from compose.cli.command import get_config_path_from_options +from compose.config.environment import Environment +from compose.const import IS_WINDOWS_PLATFORM from tests import mock -from tests import unittest -class FriendlyErrorMessageTestCase(unittest.TestCase): +class TestGetConfigPathFromOptions(object): - def test_dispatch_generic_connection_error(self): - with pytest.raises(errors.ConnectionErrorGeneric): - with mock.patch( - 'compose.cli.command.call_silently', - autospec=True, - side_effect=[0, 1] - ): - with friendly_error_message(): - raise ConnectionError() + def test_path_from_options(self): + paths = ['one.yml', 'two.yml'] + opts = {'--file': paths} + environment = Environment.from_env_file('.') + assert get_config_path_from_options('.', opts, environment) == paths + + def test_single_path_from_env(self): + with mock.patch.dict(os.environ): + os.environ['COMPOSE_FILE'] = 'one.yml' + environment = Environment.from_env_file('.') + assert get_config_path_from_options('.', {}, environment) == ['one.yml'] + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix separator') + def test_multiple_path_from_env(self): + with mock.patch.dict(os.environ): + os.environ['COMPOSE_FILE'] = 'one.yml:two.yml' + environment = Environment.from_env_file('.') + assert get_config_path_from_options( + '.', {}, environment + ) == ['one.yml', 'two.yml'] + + @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows separator') + def test_multiple_path_from_env_windows(self): + with mock.patch.dict(os.environ): + os.environ['COMPOSE_FILE'] = 'one.yml;two.yml' + environment = Environment.from_env_file('.') + assert get_config_path_from_options( + '.', {}, environment + ) == ['one.yml', 'two.yml'] + + def test_no_path(self): + environment = Environment.from_env_file('.') + assert not get_config_path_from_options('.', {}, environment) diff --git a/tests/unit/cli/docker_client_test.py b/tests/unit/cli/docker_client_test.py index d497495b4..5334a9440 100644 --- a/tests/unit/cli/docker_client_test.py +++ b/tests/unit/cli/docker_client_test.py @@ -3,7 +3,11 @@ from __future__ import unicode_literals import os -from compose.cli import docker_client +import docker +import pytest + +from compose.cli.docker_client import docker_client +from compose.cli.docker_client import tls_config_from_options from tests import mock from tests import unittest @@ -13,10 +17,95 @@ class DockerClientTestCase(unittest.TestCase): def test_docker_client_no_home(self): with mock.patch.dict(os.environ): del os.environ['HOME'] - docker_client.docker_client() + docker_client(os.environ) def test_docker_client_with_custom_timeout(self): timeout = 300 with mock.patch('compose.cli.docker_client.HTTP_TIMEOUT', 300): - client = docker_client.docker_client() + client = docker_client(os.environ) self.assertEqual(client.timeout, int(timeout)) + + +class TLSConfigTestCase(unittest.TestCase): + ca_cert = 'tests/fixtures/tls/ca.pem' + client_cert = 'tests/fixtures/tls/cert.pem' + key = 'tests/fixtures/tls/key.key' + + def test_simple_tls(self): + options = {'--tls': True} + result = tls_config_from_options(options) + assert result is True + + def test_tls_ca_cert(self): + options = { + '--tlscacert': self.ca_cert, '--tlsverify': True + } + result = tls_config_from_options(options) + assert isinstance(result, docker.tls.TLSConfig) + assert result.ca_cert == options['--tlscacert'] + assert result.verify is True + + def test_tls_ca_cert_explicit(self): + options = { + '--tlscacert': self.ca_cert, '--tls': True, + '--tlsverify': True + } + result = tls_config_from_options(options) + assert isinstance(result, docker.tls.TLSConfig) + assert result.ca_cert == options['--tlscacert'] + assert result.verify is True + + def test_tls_client_cert(self): + options = { + '--tlscert': self.client_cert, '--tlskey': self.key + } + result = tls_config_from_options(options) + assert isinstance(result, docker.tls.TLSConfig) + assert result.cert == (options['--tlscert'], options['--tlskey']) + + def test_tls_client_cert_explicit(self): + options = { + '--tlscert': self.client_cert, '--tlskey': self.key, + '--tls': True + } + result = tls_config_from_options(options) + assert isinstance(result, docker.tls.TLSConfig) + assert result.cert == (options['--tlscert'], options['--tlskey']) + + def test_tls_client_and_ca(self): + options = { + '--tlscert': self.client_cert, '--tlskey': self.key, + '--tlsverify': True, '--tlscacert': self.ca_cert + } + result = tls_config_from_options(options) + assert isinstance(result, docker.tls.TLSConfig) + assert result.cert == (options['--tlscert'], options['--tlskey']) + assert result.ca_cert == options['--tlscacert'] + assert result.verify is True + + def test_tls_client_and_ca_explicit(self): + options = { + '--tlscert': self.client_cert, '--tlskey': self.key, + '--tlsverify': True, '--tlscacert': self.ca_cert, + '--tls': True + } + result = tls_config_from_options(options) + assert isinstance(result, docker.tls.TLSConfig) + assert result.cert == (options['--tlscert'], options['--tlskey']) + assert result.ca_cert == options['--tlscacert'] + assert result.verify is True + + def test_tls_client_missing_key(self): + options = {'--tlscert': self.client_cert} + with pytest.raises(docker.errors.TLSParameterError): + tls_config_from_options(options) + + options = {'--tlskey': self.key} + with pytest.raises(docker.errors.TLSParameterError): + tls_config_from_options(options) + + def test_assert_hostname_explicit_skip(self): + options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True} + result = tls_config_from_options(options) + assert isinstance(result, docker.tls.TLSConfig) + assert result.assert_hostname is False diff --git a/tests/unit/cli/errors_test.py b/tests/unit/cli/errors_test.py new file mode 100644 index 000000000..71fa9dee5 --- /dev/null +++ b/tests/unit/cli/errors_test.py @@ -0,0 +1,51 @@ +from __future__ import absolute_import +from __future__ import unicode_literals + +import pytest +from docker.errors import APIError +from requests.exceptions import ConnectionError + +from compose.cli import errors +from compose.cli.errors import handle_connection_errors +from tests import mock + + +@pytest.yield_fixture +def mock_logging(): + with mock.patch('compose.cli.errors.log', autospec=True) as mock_log: + yield mock_log + + +def patch_call_silently(side_effect): + return mock.patch( + 'compose.cli.errors.call_silently', + autospec=True, + side_effect=side_effect) + + +class TestHandleConnectionErrors(object): + + def test_generic_connection_error(self, mock_logging): + with pytest.raises(errors.ConnectionError): + with patch_call_silently([0, 1]): + with handle_connection_errors(mock.Mock()): + raise ConnectionError() + + _, args, _ = mock_logging.error.mock_calls[0] + assert "Couldn't connect to Docker daemon at" in args[0] + + def test_api_error_version_mismatch(self, mock_logging): + with pytest.raises(errors.ConnectionError): + with handle_connection_errors(mock.Mock(api_version='1.22')): + raise APIError(None, None, b"client is newer than server") + + _, args, _ = mock_logging.error.mock_calls[0] + assert "Docker Engine of version 1.10.0 or greater" in args[0] + + def test_api_error_version_other(self, mock_logging): + msg = b"Something broke!" + with pytest.raises(errors.ConnectionError): + with handle_connection_errors(mock.Mock(api_version='1.22')): + raise APIError(None, None, msg) + + mock_logging.error.assert_called_once_with(msg) diff --git a/tests/unit/cli/log_printer_test.py b/tests/unit/cli/log_printer_test.py index 5b04226cf..ab48eefc0 100644 --- a/tests/unit/cli/log_printer_test.py +++ b/tests/unit/cli/log_printer_test.py @@ -1,27 +1,23 @@ from __future__ import absolute_import from __future__ import unicode_literals +import itertools + import pytest import six +from six.moves.queue import Queue -from compose.cli.log_printer import LogPrinter +from compose.cli.log_printer import build_log_generator +from compose.cli.log_printer import build_log_presenters +from compose.cli.log_printer import build_no_log_generator +from compose.cli.log_printer import consume_queue +from compose.cli.log_printer import QueueItem from compose.cli.log_printer import wait_on_exit +from compose.cli.log_printer import watch_events from compose.container import Container from tests import mock -def build_mock_container(reader): - return mock.Mock( - spec=Container, - name='myapp_web_1', - name_without_project='web_1', - has_api_logs=True, - log_stream=None, - attach=reader, - wait=mock.Mock(return_value=0), - ) - - @pytest.fixture def output_stream(): output = six.StringIO() @@ -31,58 +27,151 @@ def output_stream(): @pytest.fixture def mock_container(): - def reader(*args, **kwargs): - yield b"hello\nworld" - return build_mock_container(reader) + return mock.Mock(spec=Container, name_without_project='web_1') -class TestLogPrinter(object): +class TestLogPresenter(object): - def test_single_container(self, output_stream, mock_container): - LogPrinter([mock_container], output=output_stream).run() + def test_monochrome(self, mock_container): + presenters = build_log_presenters(['foo', 'bar'], True) + presenter = next(presenters) + actual = presenter.present(mock_container, "this line") + assert actual == "web_1 | this line" - output = output_stream.getvalue() - assert 'hello' in output - assert 'world' in output - # Call count is 2 lines + "container exited line" - assert output_stream.flush.call_count == 3 + def test_polychrome(self, mock_container): + presenters = build_log_presenters(['foo', 'bar'], False) + presenter = next(presenters) + actual = presenter.present(mock_container, "this line") + assert '\033[' in actual - def test_monochrome(self, output_stream, mock_container): - LogPrinter([mock_container], output=output_stream, monochrome=True).run() - assert '\033[' not in output_stream.getvalue() - def test_polychrome(self, output_stream, mock_container): - LogPrinter([mock_container], output=output_stream).run() - assert '\033[' in output_stream.getvalue() +def test_wait_on_exit(): + exit_status = 3 + mock_container = mock.Mock( + spec=Container, + name='cname', + wait=mock.Mock(return_value=exit_status)) + + expected = '{} exited with code {}\n'.format(mock_container.name, exit_status) + assert expected == wait_on_exit(mock_container) + + +def test_build_no_log_generator(mock_container): + mock_container.has_api_logs = False + mock_container.log_driver = 'none' + output, = build_no_log_generator(mock_container, None) + assert "WARNING: no logs are available with the 'none' log driver\n" in output + assert "exited with code" not in output + + +class TestBuildLogGenerator(object): + + def test_no_log_stream(self, mock_container): + mock_container.log_stream = None + mock_container.logs.return_value = iter([b"hello\nworld"]) + log_args = {'follow': True} + + generator = build_log_generator(mock_container, log_args) + assert next(generator) == "hello\n" + assert next(generator) == "world" + mock_container.logs.assert_called_once_with( + stdout=True, + stderr=True, + stream=True, + **log_args) + + def test_with_log_stream(self, mock_container): + mock_container.log_stream = iter([b"hello\nworld"]) + log_args = {'follow': True} + + generator = build_log_generator(mock_container, log_args) + assert next(generator) == "hello\n" + assert next(generator) == "world" def test_unicode(self, output_stream): - glyph = u'\u2022' + glyph = u'\u2022\n' + mock_container.log_stream = iter([glyph.encode('utf-8')]) - def reader(*args, **kwargs): - yield glyph.encode('utf-8') + b'\n' + generator = build_log_generator(mock_container, {}) + assert next(generator) == glyph - container = build_mock_container(reader) - LogPrinter([container], output=output_stream).run() - output = output_stream.getvalue() - if six.PY2: - output = output.decode('utf-8') - assert glyph in output +@pytest.fixture +def thread_map(): + return {'cid': mock.Mock()} - def test_wait_on_exit(self): - exit_status = 3 - mock_container = mock.Mock( - spec=Container, - name='cname', - wait=mock.Mock(return_value=exit_status)) - expected = '{} exited with code {}\n'.format(mock_container.name, exit_status) - assert expected == wait_on_exit(mock_container) +@pytest.fixture +def mock_presenters(): + return itertools.cycle([mock.Mock()]) - def test_generator_with_no_logs(self, mock_container, output_stream): - mock_container.has_api_logs = False - mock_container.log_driver = 'none' - LogPrinter([mock_container], output=output_stream).run() - output = output_stream.getvalue() - assert "WARNING: no logs are available with the 'none' log driver\n" in output +class TestWatchEvents(object): + + def test_stop_event(self, thread_map, mock_presenters): + event_stream = [{'action': 'stop', 'id': 'cid'}] + watch_events(thread_map, event_stream, mock_presenters, ()) + assert not thread_map + + def test_start_event(self, thread_map, mock_presenters): + container_id = 'abcd' + event = {'action': 'start', 'id': container_id, 'container': mock.Mock()} + event_stream = [event] + thread_args = 'foo', 'bar' + + with mock.patch( + 'compose.cli.log_printer.build_thread', + autospec=True + ) as mock_build_thread: + watch_events(thread_map, event_stream, mock_presenters, thread_args) + mock_build_thread.assert_called_once_with( + event['container'], + next(mock_presenters), + *thread_args) + assert container_id in thread_map + + def test_other_event(self, thread_map, mock_presenters): + container_id = 'abcd' + event_stream = [{'action': 'create', 'id': container_id}] + watch_events(thread_map, event_stream, mock_presenters, ()) + assert container_id not in thread_map + + +class TestConsumeQueue(object): + + def test_item_is_an_exception(self): + + class Problem(Exception): + pass + + queue = Queue() + error = Problem('oops') + for item in QueueItem.new('a'), QueueItem.new('b'), QueueItem.exception(error): + queue.put(item) + + generator = consume_queue(queue, False) + assert next(generator) == 'a' + assert next(generator) == 'b' + with pytest.raises(Problem): + next(generator) + + def test_item_is_stop_without_cascade_stop(self): + queue = Queue() + for item in QueueItem.stop(), QueueItem.new('a'), QueueItem.new('b'): + queue.put(item) + + generator = consume_queue(queue, False) + assert next(generator) == 'a' + assert next(generator) == 'b' + + def test_item_is_stop_with_cascade_stop(self): + queue = Queue() + for item in QueueItem.stop(), QueueItem.new('a'), QueueItem.new('b'): + queue.put(item) + + assert list(consume_queue(queue, True)) == [] + + def test_item_is_none_when_timeout_is_hit(self): + queue = Queue() + generator = consume_queue(queue, False) + assert next(generator) is None diff --git a/tests/unit/cli/main_test.py b/tests/unit/cli/main_test.py index fd6c50028..dc5278800 100644 --- a/tests/unit/cli/main_test.py +++ b/tests/unit/cli/main_test.py @@ -3,15 +3,16 @@ from __future__ import unicode_literals import logging +import pytest + from compose import container from compose.cli.errors import UserError from compose.cli.formatter import ConsoleWarningFormatter -from compose.cli.main import build_log_printer from compose.cli.main import convergence_strategy_from_opts +from compose.cli.main import filter_containers_to_service_names from compose.cli.main import setup_console_handler from compose.service import ConvergenceStrategy from tests import mock -from tests import unittest def mock_container(service, number): @@ -22,9 +23,16 @@ def mock_container(service, number): name_without_project='{0}_{1}'.format(service, number)) -class CLIMainTestCase(unittest.TestCase): +@pytest.fixture +def logging_handler(): + stream = mock.Mock() + stream.isatty.return_value = True + return logging.StreamHandler(stream=stream) - def test_build_log_printer(self): + +class TestCLIMainTestCase(object): + + def test_filter_containers_to_service_names(self): containers = [ mock_container('web', 1), mock_container('web', 2), @@ -33,69 +41,64 @@ class CLIMainTestCase(unittest.TestCase): mock_container('another', 1), ] service_names = ['web', 'db'] - log_printer = build_log_printer(containers, service_names, True, False) - self.assertEqual(log_printer.containers, containers[:3]) + actual = filter_containers_to_service_names(containers, service_names) + assert actual == containers[:3] - def test_build_log_printer_all_services(self): + def test_filter_containers_to_service_names_all(self): containers = [ mock_container('web', 1), mock_container('db', 1), mock_container('other', 1), ] service_names = [] - log_printer = build_log_printer(containers, service_names, True, False) - self.assertEqual(log_printer.containers, containers) + actual = filter_containers_to_service_names(containers, service_names) + assert actual == containers -class SetupConsoleHandlerTestCase(unittest.TestCase): +class TestSetupConsoleHandlerTestCase(object): - def setUp(self): - self.stream = mock.Mock() - self.stream.isatty.return_value = True - self.handler = logging.StreamHandler(stream=self.stream) + def test_with_tty_verbose(self, logging_handler): + setup_console_handler(logging_handler, True) + assert type(logging_handler.formatter) == ConsoleWarningFormatter + assert '%(name)s' in logging_handler.formatter._fmt + assert '%(funcName)s' in logging_handler.formatter._fmt - def test_with_tty_verbose(self): - setup_console_handler(self.handler, True) - assert type(self.handler.formatter) == ConsoleWarningFormatter - assert '%(name)s' in self.handler.formatter._fmt - assert '%(funcName)s' in self.handler.formatter._fmt + def test_with_tty_not_verbose(self, logging_handler): + setup_console_handler(logging_handler, False) + assert type(logging_handler.formatter) == ConsoleWarningFormatter + assert '%(name)s' not in logging_handler.formatter._fmt + assert '%(funcName)s' not in logging_handler.formatter._fmt - def test_with_tty_not_verbose(self): - setup_console_handler(self.handler, False) - assert type(self.handler.formatter) == ConsoleWarningFormatter - assert '%(name)s' not in self.handler.formatter._fmt - assert '%(funcName)s' not in self.handler.formatter._fmt - - def test_with_not_a_tty(self): - self.stream.isatty.return_value = False - setup_console_handler(self.handler, False) - assert type(self.handler.formatter) == logging.Formatter + def test_with_not_a_tty(self, logging_handler): + logging_handler.stream.isatty.return_value = False + setup_console_handler(logging_handler, False) + assert type(logging_handler.formatter) == logging.Formatter -class ConvergeStrategyFromOptsTestCase(unittest.TestCase): +class TestConvergeStrategyFromOptsTestCase(object): def test_invalid_opts(self): options = {'--force-recreate': True, '--no-recreate': True} - with self.assertRaises(UserError): + with pytest.raises(UserError): convergence_strategy_from_opts(options) def test_always(self): options = {'--force-recreate': True, '--no-recreate': False} - self.assertEqual( - convergence_strategy_from_opts(options), + assert ( + convergence_strategy_from_opts(options) == ConvergenceStrategy.always ) def test_never(self): options = {'--force-recreate': False, '--no-recreate': True} - self.assertEqual( - convergence_strategy_from_opts(options), + assert ( + convergence_strategy_from_opts(options) == ConvergenceStrategy.never ) def test_changed(self): options = {'--force-recreate': False, '--no-recreate': False} - self.assertEqual( - convergence_strategy_from_opts(options), + assert ( + convergence_strategy_from_opts(options) == ConvergenceStrategy.changed ) diff --git a/tests/unit/cli_test.py b/tests/unit/cli_test.py index 26ae4e300..bd35dc06f 100644 --- a/tests/unit/cli_test.py +++ b/tests/unit/cli_test.py @@ -3,6 +3,8 @@ from __future__ import absolute_import from __future__ import unicode_literals import os +import shutil +import tempfile import docker import py @@ -10,13 +12,14 @@ import pytest from .. import mock from .. import unittest +from ..helpers import build_config from compose.cli.command import get_project from compose.cli.command import get_project_name from compose.cli.docopt_command import NoSuchCommand from compose.cli.errors import UserError from compose.cli.main import TopLevelCommand from compose.const import IS_WINDOWS_PLATFORM -from compose.service import Service +from compose.project import Project class CLITestCase(unittest.TestCase): @@ -42,11 +45,11 @@ class CLITestCase(unittest.TestCase): project_name = get_project_name(None, project_name=name) self.assertEquals('explicitprojectname', project_name) + @mock.patch.dict(os.environ) def test_project_name_from_environment_new_var(self): name = 'namefromenv' - with mock.patch.dict(os.environ): - os.environ['COMPOSE_PROJECT_NAME'] = name - project_name = get_project_name(None) + os.environ['COMPOSE_PROJECT_NAME'] = name + project_name = get_project_name(None) self.assertEquals(project_name, name) def test_project_name_with_empty_environment_var(self): @@ -56,6 +59,22 @@ class CLITestCase(unittest.TestCase): project_name = get_project_name(base_dir) self.assertEquals('simplecomposefile', project_name) + @mock.patch.dict(os.environ) + def test_project_name_with_environment_file(self): + base_dir = tempfile.mkdtemp() + try: + name = 'namefromenvfile' + with open(os.path.join(base_dir, '.env'), 'w') as f: + f.write('COMPOSE_PROJECT_NAME={}'.format(name)) + project_name = get_project_name(base_dir) + assert project_name == name + + # Environment has priority over .env file + os.environ['COMPOSE_PROJECT_NAME'] = 'namefromenv' + assert get_project_name(base_dir) == os.environ['COMPOSE_PROJECT_NAME'] + finally: + shutil.rmtree(base_dir) + def test_get_project(self): base_dir = 'tests/fixtures/longer-filename-composefile' project = get_project(base_dir) @@ -63,39 +82,35 @@ class CLITestCase(unittest.TestCase): self.assertTrue(project.client) self.assertTrue(project.services) - def test_help(self): - command = TopLevelCommand() - with self.assertRaises(SystemExit): - command.dispatch(['-h'], None) - def test_command_help(self): - with self.assertRaises(SystemExit) as ctx: - TopLevelCommand().dispatch(['help', 'up'], None) + with pytest.raises(SystemExit) as exc: + TopLevelCommand.help({'COMMAND': 'up'}) - self.assertIn('Usage: up', str(ctx.exception)) + assert 'Usage: up' in exc.exconly() def test_command_help_nonexistent(self): - with self.assertRaises(NoSuchCommand): - TopLevelCommand().dispatch(['help', 'nonexistent'], None) + with pytest.raises(NoSuchCommand): + TopLevelCommand.help({'COMMAND': 'nonexistent'}) @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty") @mock.patch('compose.cli.main.RunOperation', autospec=True) @mock.patch('compose.cli.main.PseudoTerminal', autospec=True) def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation): - command = TopLevelCommand() mock_client = mock.create_autospec(docker.Client) - mock_project = mock.Mock(client=mock_client) - mock_project.get_service.return_value = Service( - 'service', + project = Project.from_config( + name='composetest', client=mock_client, - environment=['FOO=ONE', 'BAR=TWO'], - image='someimage') + config_data=build_config({ + 'service': {'image': 'busybox'} + }), + ) + command = TopLevelCommand(project) with pytest.raises(SystemExit): - command.run(mock_project, { + command.run({ 'SERVICE': 'service', 'COMMAND': None, - '-e': ['BAR=NEW', 'OTHER=bär'.encode('utf-8')], + '-e': [], '--user': None, '--no-deps': None, '-d': False, @@ -105,54 +120,28 @@ class CLITestCase(unittest.TestCase): '--publish': [], '--rm': None, '--name': None, + '--workdir': None, }) _, _, call_kwargs = mock_run_operation.mock_calls[0] assert call_kwargs['logs'] is False - @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty") - @mock.patch('compose.cli.main.PseudoTerminal', autospec=True) - def test_run_with_environment_merged_with_options_list(self, mock_pseudo_terminal): - command = TopLevelCommand() + def test_run_service_with_restart_always(self): mock_client = mock.create_autospec(docker.Client) - mock_project = mock.Mock(client=mock_client) - mock_project.get_service.return_value = Service( - 'service', + + project = Project.from_config( + name='composetest', client=mock_client, - environment=['FOO=ONE', 'BAR=TWO'], - image='someimage') - - command.run(mock_project, { - 'SERVICE': 'service', - 'COMMAND': None, - '-e': ['BAR=NEW', 'OTHER=bär'.encode('utf-8')], - '--user': None, - '--no-deps': None, - '-d': True, - '-T': None, - '--entrypoint': None, - '--service-ports': None, - '--publish': [], - '--rm': None, - '--name': None, - }) - - _, _, call_kwargs = mock_client.create_container.mock_calls[0] - assert ( - sorted(call_kwargs['environment']) == - sorted(['FOO=ONE', 'BAR=NEW', 'OTHER=bär']) + config_data=build_config({ + 'service': { + 'image': 'busybox', + 'restart': 'always', + } + }), ) - def test_run_service_with_restart_always(self): - command = TopLevelCommand() - mock_client = mock.create_autospec(docker.Client) - mock_project = mock.Mock(client=mock_client) - mock_project.get_service.return_value = Service( - 'service', - client=mock_client, - restart={'Name': 'always', 'MaximumRetryCount': 0}, - image='someimage') - command.run(mock_project, { + command = TopLevelCommand(project) + command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], @@ -165,6 +154,7 @@ class CLITestCase(unittest.TestCase): '--publish': [], '--rm': None, '--name': None, + '--workdir': None, }) self.assertEquals( @@ -172,15 +162,8 @@ class CLITestCase(unittest.TestCase): 'always' ) - command = TopLevelCommand() - mock_client = mock.create_autospec(docker.Client) - mock_project = mock.Mock(client=mock_client) - mock_project.get_service.return_value = Service( - 'service', - client=mock_client, - restart='always', - image='someimage') - command.run(mock_project, { + command = TopLevelCommand(project) + command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], @@ -193,6 +176,7 @@ class CLITestCase(unittest.TestCase): '--publish': [], '--rm': True, '--name': None, + '--workdir': None, }) self.assertFalse( @@ -200,18 +184,17 @@ class CLITestCase(unittest.TestCase): ) def test_command_manula_and_service_ports_together(self): - command = TopLevelCommand() - mock_client = mock.create_autospec(docker.Client) - mock_project = mock.Mock(client=mock_client) - mock_project.get_service.return_value = Service( - 'service', - client=mock_client, - restart='always', - image='someimage', + project = Project.from_config( + name='composetest', + client=None, + config_data=build_config({ + 'service': {'image': 'busybox'}, + }), ) + command = TopLevelCommand(project) with self.assertRaises(UserError): - command.run(mock_project, { + command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py index 11bc7f0b7..2bbbe6145 100644 --- a/tests/unit/config/config_test.py +++ b/tests/unit/config/config_test.py @@ -11,11 +11,13 @@ from operator import itemgetter import py import pytest +from ...helpers import build_config_details from compose.config import config from compose.config.config import resolve_build_args from compose.config.config import resolve_environment from compose.config.config import V1 from compose.config.config import V2_0 +from compose.config.environment import Environment from compose.config.errors import ConfigurationError from compose.config.errors import VERSION_EXPLANATION from compose.config.types import VolumeSpec @@ -35,7 +37,9 @@ def make_service_dict(name, service_dict, working_dir, filename=None): filename=filename, name=name, config=service_dict), - config.ConfigFile(filename=filename, config={})) + config.ConfigFile(filename=filename, config={}), + environment=Environment.from_env_file(working_dir) + ) return config.process_service(resolver.run()) @@ -43,12 +47,6 @@ def service_sort(services): return sorted(services, key=itemgetter('name')) -def build_config_details(contents, working_dir='working_dir', filename='filename.yml'): - return config.ConfigDetails( - working_dir, - [config.ConfigFile(filename, contents)]) - - class ConfigTest(unittest.TestCase): def test_load(self): service_dicts = config.load( @@ -342,20 +340,17 @@ class ConfigTest(unittest.TestCase): for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']: with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( - {invalid_name: {'image': 'busybox'}}, - 'working_dir', - 'filename.yml')) + {invalid_name: {'image': 'busybox'}})) assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly() - def test_config_invalid_service_names_v2(self): + def test_load_config_invalid_service_names_v2(self): for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']: with pytest.raises(ConfigurationError) as exc: - config.load( - build_config_details({ + config.load(build_config_details( + { 'version': '2', - 'services': {invalid_name: {'image': 'busybox'}} - }, 'working_dir', 'filename.yml') - ) + 'services': {invalid_name: {'image': 'busybox'}}, + })) assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly() def test_load_with_invalid_field_name(self): @@ -412,7 +407,7 @@ class ConfigTest(unittest.TestCase): config.load(config_details) assert ( "services.web.build.args contains an invalid type, it should be an " - "array, or an object" in exc.exconly() + "object, or an array" in exc.exconly() ) def test_config_integer_service_name_raise_validation_error(self): @@ -697,6 +692,31 @@ class ConfigTest(unittest.TestCase): assert service['build']['args']['opt1'] == '42' assert service['build']['args']['opt2'] == 'foobar' + def test_build_args_allow_empty_properties(self): + service = config.load( + build_config_details( + { + 'version': '2', + 'services': { + 'web': { + 'build': { + 'context': '.', + 'dockerfile': 'Dockerfile-alt', + 'args': { + 'foo': None + } + } + } + } + }, + 'tests/fixtures/extends', + 'filename.yml' + ) + ).services[0] + assert 'args' in service['build'] + assert 'foo' in service['build']['args'] + assert service['build']['args']['foo'] == 'None' + def test_load_with_multiple_files_mismatched_networks_format(self): base_file = config.ConfigFile( 'base.yaml', @@ -1098,22 +1118,18 @@ class ConfigTest(unittest.TestCase): ).services self.assertEqual(service[0]['entrypoint'], entrypoint) - @mock.patch('compose.config.validation.log') - def test_logs_warning_for_boolean_in_environment(self, mock_logging): - expected_warning_msg = "There is a boolean value in the 'environment'" - config.load( - build_config_details( - {'web': { - 'image': 'busybox', - 'environment': {'SHOW_STUFF': True} - }}, - 'working_dir', - 'filename.yml' - ) - ) + def test_logs_warning_for_boolean_in_environment(self): + config_details = build_config_details({ + 'web': { + 'image': 'busybox', + 'environment': {'SHOW_STUFF': True} + } + }) - assert mock_logging.warn.called - assert expected_warning_msg in mock_logging.warn.call_args[0][0] + with pytest.raises(ConfigurationError) as exc: + config.load(config_details) + + assert "contains true, which is an invalid type" in exc.exconly() def test_config_valid_environment_dict_key_contains_dashes(self): services = config.load( @@ -1181,6 +1197,24 @@ class ConfigTest(unittest.TestCase): } ] + def test_tmpfs_option(self): + actual = config.load(build_config_details({ + 'version': '2', + 'services': { + 'web': { + 'image': 'alpine', + 'tmpfs': '/run', + } + } + })) + assert actual.services == [ + { + 'name': 'web', + 'image': 'alpine', + 'tmpfs': ['/run'], + } + ] + def test_merge_service_dicts_from_files_with_extends_in_base(self): base = { 'volumes': ['.:/app'], @@ -1251,6 +1285,24 @@ class ConfigTest(unittest.TestCase): } } + def test_merge_logging_v1(self): + base = { + 'image': 'alpine:edge', + 'log_driver': 'something', + 'log_opt': {'foo': 'three'}, + } + override = { + 'image': 'alpine:edge', + 'command': 'true', + } + actual = config.merge_service_dicts(base, override, V1) + assert actual == { + 'image': 'alpine:edge', + 'log_driver': 'something', + 'log_opt': {'foo': 'three'}, + 'command': 'true', + } + def test_external_volume_config(self): config_details = build_config_details({ 'version': '2', @@ -1317,7 +1369,7 @@ class ConfigTest(unittest.TestCase): }) with pytest.raises(ConfigurationError) as exc: config.load(config_details) - assert 'one.build is invalid, context is required.' in exc.exconly() + assert 'has neither an image nor a build context' in exc.exconly() class NetworkModeTest(unittest.TestCase): @@ -1532,8 +1584,25 @@ class PortsTest(unittest.TestCase): class InterpolationTest(unittest.TestCase): + @mock.patch.dict(os.environ) + def test_config_file_with_environment_file(self): + project_dir = 'tests/fixtures/default-env-file' + service_dicts = config.load( + config.find( + project_dir, None, Environment.from_env_file(project_dir) + ) + ).services + + self.assertEqual(service_dicts[0], { + 'name': 'web', + 'image': 'alpine:latest', + 'ports': ['5643', '9999'], + 'command': 'true' + }) + @mock.patch.dict(os.environ) def test_config_file_with_environment_variable(self): + project_dir = 'tests/fixtures/environment-interpolation' os.environ.update( IMAGE="busybox", HOST_PORT="80", @@ -1541,7 +1610,9 @@ class InterpolationTest(unittest.TestCase): ) service_dicts = config.load( - config.find('tests/fixtures/environment-interpolation', None), + config.find( + project_dir, None, Environment.from_env_file(project_dir) + ) ).services self.assertEqual(service_dicts, [ @@ -1571,7 +1642,7 @@ class InterpolationTest(unittest.TestCase): None, ) - with mock.patch('compose.config.interpolation.log') as log: + with mock.patch('compose.config.environment.log') as log: config.load(config_details) self.assertEqual(2, log.warn.call_count) @@ -1652,24 +1723,42 @@ class VolumeConfigTest(unittest.TestCase): @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths') def test_relative_path_does_expand_posix(self): - d = make_service_dict('foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='/home/me/myproject') + d = make_service_dict( + 'foo', + {'build': '.', 'volumes': ['./data:/data']}, + working_dir='/home/me/myproject') self.assertEqual(d['volumes'], ['/home/me/myproject/data:/data']) - d = make_service_dict('foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='/home/me/myproject') + d = make_service_dict( + 'foo', + {'build': '.', 'volumes': ['.:/data']}, + working_dir='/home/me/myproject') self.assertEqual(d['volumes'], ['/home/me/myproject:/data']) - d = make_service_dict('foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='/home/me/myproject') + d = make_service_dict( + 'foo', + {'build': '.', 'volumes': ['../otherproject:/data']}, + working_dir='/home/me/myproject') self.assertEqual(d['volumes'], ['/home/me/otherproject:/data']) @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows paths') def test_relative_path_does_expand_windows(self): - d = make_service_dict('foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='c:\\Users\\me\\myproject') + d = make_service_dict( + 'foo', + {'build': '.', 'volumes': ['./data:/data']}, + working_dir='c:\\Users\\me\\myproject') self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject\\data:/data']) - d = make_service_dict('foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='c:\\Users\\me\\myproject') + d = make_service_dict( + 'foo', + {'build': '.', 'volumes': ['.:/data']}, + working_dir='c:\\Users\\me\\myproject') self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject:/data']) - d = make_service_dict('foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='c:\\Users\\me\\myproject') + d = make_service_dict( + 'foo', + {'build': '.', 'volumes': ['../otherproject:/data']}, + working_dir='c:\\Users\\me\\myproject') self.assertEqual(d['volumes'], ['c:\\Users\\me\\otherproject:/data']) @mock.patch.dict(os.environ) @@ -1974,7 +2063,9 @@ class EnvTest(unittest.TestCase): }, } self.assertEqual( - resolve_environment(service_dict), + resolve_environment( + service_dict, Environment.from_env_file(None) + ), {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None}, ) @@ -2011,7 +2102,10 @@ class EnvTest(unittest.TestCase): os.environ['FILE_DEF_EMPTY'] = 'E2' os.environ['ENV_DEF'] = 'E3' self.assertEqual( - resolve_environment({'env_file': ['tests/fixtures/env/resolve.env']}), + resolve_environment( + {'env_file': ['tests/fixtures/env/resolve.env']}, + Environment.from_env_file(None) + ), { 'FILE_DEF': u'bär', 'FILE_DEF_EMPTY': '', @@ -2034,7 +2128,7 @@ class EnvTest(unittest.TestCase): } } self.assertEqual( - resolve_build_args(build), + resolve_build_args(build, Environment.from_env_file(build['context'])), {'arg1': 'value1', 'empty_arg': '', 'env_arg': 'value2', 'no_env': None}, ) @@ -2066,7 +2160,9 @@ class EnvTest(unittest.TestCase): def load_from_filename(filename): - return config.load(config.find('.', [filename])).services + return config.load( + config.find('.', [filename], Environment.from_env_file('.')) + ).services class ExtendsTest(unittest.TestCase): @@ -2269,7 +2365,7 @@ class ExtendsTest(unittest.TestCase): with pytest.raises(ConfigurationError) as exc: load_from_filename('tests/fixtures/extends/service-with-invalid-schema.yml') assert ( - "myweb has neither an image nor a build path specified" in + "myweb has neither an image nor a build context specified" in exc.exconly() ) @@ -2398,6 +2494,7 @@ class ExtendsTest(unittest.TestCase): }, ])) + @mock.patch.dict(os.environ) def test_extends_with_environment_and_env_files(self): tmpdir = py.test.ensuretemp('test_extends_with_environment') self.addCleanup(tmpdir.remove) @@ -2453,12 +2550,12 @@ class ExtendsTest(unittest.TestCase): }, }, ] - with mock.patch.dict(os.environ): - os.environ['SECRET'] = 'secret' - os.environ['THING'] = 'thing' - os.environ['COMMON_ENV_FILE'] = 'secret' - os.environ['TOP_ENV_FILE'] = 'secret' - config = load_from_filename(str(tmpdir.join('docker-compose.yml'))) + + os.environ['SECRET'] = 'secret' + os.environ['THING'] = 'thing' + os.environ['COMMON_ENV_FILE'] = 'secret' + os.environ['TOP_ENV_FILE'] = 'secret' + config = load_from_filename(str(tmpdir.join('docker-compose.yml'))) assert config == expected @@ -2553,14 +2650,11 @@ class VolumePathTest(unittest.TestCase): @pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive') def test_split_path_mapping_with_windows_path(self): - windows_volume_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config:/opt/connect/config:ro" - expected_mapping = ( - "/opt/connect/config:ro", - "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config" - ) + host_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config" + windows_volume_path = host_path + ":/opt/connect/config:ro" + expected_mapping = ("/opt/connect/config:ro", host_path) mapping = config.split_path_mapping(windows_volume_path) - self.assertEqual(mapping, expected_mapping) diff --git a/tests/unit/config/interpolation_test.py b/tests/unit/config/interpolation_test.py index 0691e8865..42b5db6e9 100644 --- a/tests/unit/config/interpolation_test.py +++ b/tests/unit/config/interpolation_test.py @@ -6,6 +6,7 @@ import os import mock import pytest +from compose.config.environment import Environment from compose.config.interpolation import interpolate_environment_variables @@ -19,7 +20,7 @@ def mock_env(): def test_interpolate_environment_variables_in_services(mock_env): services = { - 'servivea': { + 'servicea': { 'image': 'example:${USER}', 'volumes': ['$FOO:/target'], 'logging': { @@ -31,7 +32,7 @@ def test_interpolate_environment_variables_in_services(mock_env): } } expected = { - 'servivea': { + 'servicea': { 'image': 'example:jenny', 'volumes': ['bar:/target'], 'logging': { @@ -42,7 +43,9 @@ def test_interpolate_environment_variables_in_services(mock_env): } } } - assert interpolate_environment_variables(services, 'service') == expected + assert interpolate_environment_variables( + services, 'service', Environment.from_env_file(None) + ) == expected def test_interpolate_environment_variables_in_volumes(mock_env): @@ -66,4 +69,6 @@ def test_interpolate_environment_variables_in_volumes(mock_env): }, 'other': {}, } - assert interpolate_environment_variables(volumes, 'volume') == expected + assert interpolate_environment_variables( + volumes, 'volume', Environment.from_env_file(None) + ) == expected diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py index 189b0c992..47f60de8f 100644 --- a/tests/unit/container_test.py +++ b/tests/unit/container_test.py @@ -156,7 +156,9 @@ class GetContainerNameTestCase(unittest.TestCase): def test_get_container_name(self): self.assertIsNone(get_container_name({})) self.assertEqual(get_container_name({'Name': 'myproject_db_1'}), 'myproject_db_1') - self.assertEqual(get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}), 'myproject_db_1') + self.assertEqual( + get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}), + 'myproject_db_1') self.assertEqual( get_container_name({ 'Names': [ diff --git a/tests/unit/interpolation_test.py b/tests/unit/interpolation_test.py index 317982a9b..c3050c2ca 100644 --- a/tests/unit/interpolation_test.py +++ b/tests/unit/interpolation_test.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import unittest -from compose.config.interpolation import BlankDefaultDict as bddict +from compose.config.environment import Environment as bddict from compose.config.interpolation import interpolate from compose.config.interpolation import InvalidInterpolation diff --git a/tests/unit/multiplexer_test.py b/tests/unit/multiplexer_test.py deleted file mode 100644 index 737ba25d6..000000000 --- a/tests/unit/multiplexer_test.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import absolute_import -from __future__ import unicode_literals - -import unittest -from time import sleep - -from compose.cli.multiplexer import Multiplexer - - -class MultiplexerTest(unittest.TestCase): - def test_no_iterators(self): - mux = Multiplexer([]) - self.assertEqual([], list(mux.loop())) - - def test_empty_iterators(self): - mux = Multiplexer([ - (x for x in []), - (x for x in []), - ]) - - self.assertEqual([], list(mux.loop())) - - def test_aggregates_output(self): - mux = Multiplexer([ - (x for x in [0, 2, 4]), - (x for x in [1, 3, 5]), - ]) - - self.assertEqual( - [0, 1, 2, 3, 4, 5], - sorted(list(mux.loop())), - ) - - def test_exception(self): - class Problem(Exception): - pass - - def problematic_iterator(): - yield 0 - yield 2 - raise Problem(":(") - - mux = Multiplexer([ - problematic_iterator(), - (x for x in [1, 3, 5]), - ]) - - with self.assertRaises(Problem): - list(mux.loop()) - - def test_cascade_stop(self): - def fast_stream(): - for num in range(3): - yield "stream1 %s" % num - - def slow_stream(): - sleep(5) - yield "stream2 FAIL" - - mux = Multiplexer([fast_stream(), slow_stream()], cascade_stop=True) - assert "stream2 FAIL" not in set(mux.loop()) diff --git a/tests/unit/parallel_test.py b/tests/unit/parallel_test.py new file mode 100644 index 000000000..45b0db1db --- /dev/null +++ b/tests/unit/parallel_test.py @@ -0,0 +1,90 @@ +from __future__ import absolute_import +from __future__ import unicode_literals + +import six +from docker.errors import APIError + +from compose.parallel import parallel_execute +from compose.parallel import parallel_execute_iter +from compose.parallel import UpstreamError + + +web = 'web' +db = 'db' +data_volume = 'data_volume' +cache = 'cache' + +objects = [web, db, data_volume, cache] + +deps = { + web: [db, cache], + db: [data_volume], + data_volume: [], + cache: [], +} + + +def get_deps(obj): + return deps[obj] + + +def test_parallel_execute(): + results = parallel_execute( + objects=[1, 2, 3, 4, 5], + func=lambda x: x * 2, + get_name=six.text_type, + msg="Doubling", + ) + + assert sorted(results) == [2, 4, 6, 8, 10] + + +def test_parallel_execute_with_deps(): + log = [] + + def process(x): + log.append(x) + + parallel_execute( + objects=objects, + func=process, + get_name=lambda obj: obj, + msg="Processing", + get_deps=get_deps, + ) + + assert sorted(log) == sorted(objects) + + assert log.index(data_volume) < log.index(db) + assert log.index(db) < log.index(web) + assert log.index(cache) < log.index(web) + + +def test_parallel_execute_with_upstream_errors(): + log = [] + + def process(x): + if x is data_volume: + raise APIError(None, None, "Something went wrong") + log.append(x) + + parallel_execute( + objects=objects, + func=process, + get_name=lambda obj: obj, + msg="Processing", + get_deps=get_deps, + ) + + assert log == [cache] + + events = [ + (obj, result, type(exception)) + for obj, result, exception + in parallel_execute_iter(objects, process, get_deps) + ] + + assert (cache, None, type(None)) in events + assert (data_volume, None, APIError) in events + assert (db, None, UpstreamError) in events + assert (web, None, UpstreamError) in events diff --git a/tests/unit/project_test.py b/tests/unit/project_test.py index c28c21523..b6a52e08d 100644 --- a/tests/unit/project_test.py +++ b/tests/unit/project_test.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals import datetime import docker +from docker.errors import NotFound from .. import mock from .. import unittest @@ -12,6 +13,7 @@ from compose.config.types import VolumeFromSpec from compose.const import LABEL_SERVICE from compose.container import Container from compose.project import Project +from compose.service import ImageType from compose.service import Service @@ -268,12 +270,21 @@ class ProjectTest(unittest.TestCase): 'time': 1420092061, 'timeNano': 14200920610000004000, }, + { + 'status': 'destroy', + 'from': 'example/db', + 'id': 'eeeee', + 'time': 1420092061, + 'timeNano': 14200920610000004000, + }, ]) def dt_with_microseconds(dt, us): return datetime.datetime.fromtimestamp(dt).replace(microsecond=us) def get_container(cid): + if cid == 'eeeee': + raise NotFound(None, None, "oops") if cid == 'abcde': name = 'web' labels = {LABEL_SERVICE: name} @@ -307,6 +318,7 @@ class ProjectTest(unittest.TestCase): 'image': 'example/image', }, 'time': dt_with_microseconds(1420092061, 2), + 'container': Container(None, {'Id': 'abcde'}), }, { 'type': 'container', @@ -318,6 +330,7 @@ class ProjectTest(unittest.TestCase): 'image': 'example/image', }, 'time': dt_with_microseconds(1420092061, 3), + 'container': Container(None, {'Id': 'abcde'}), }, { 'type': 'container', @@ -329,6 +342,7 @@ class ProjectTest(unittest.TestCase): 'image': 'example/db', }, 'time': dt_with_microseconds(1420092061, 4), + 'container': Container(None, {'Id': 'ababa'}), }, ] @@ -476,3 +490,23 @@ class ProjectTest(unittest.TestCase): ), ) self.assertEqual([c.id for c in project.containers()], ['1']) + + def test_down_with_no_resources(self): + project = Project.from_config( + name='test', + client=self.mock_client, + config_data=Config( + version='2', + services=[{ + 'name': 'web', + 'image': 'busybox:latest', + }], + networks={'default': {}}, + volumes={'data': {}}, + ), + ) + self.mock_client.remove_network.side_effect = NotFound(None, None, 'oops') + self.mock_client.remove_volume.side_effect = NotFound(None, None, 'oops') + + project.down(ImageType.all, True) + self.mock_client.remove_image.assert_called_once_with("busybox:latest") diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py index 321ebad05..fe3794daf 100644 --- a/tests/unit/service_test.py +++ b/tests/unit/service_test.py @@ -2,6 +2,7 @@ from __future__ import absolute_import from __future__ import unicode_literals import docker +import pytest from docker.errors import APIError from .. import mock @@ -13,8 +14,10 @@ from compose.const import LABEL_ONE_OFF from compose.const import LABEL_PROJECT from compose.const import LABEL_SERVICE from compose.container import Container +from compose.project import OneOffFilter from compose.service import build_ulimits from compose.service import build_volume_binding +from compose.service import BuildAction from compose.service import ContainerNetworkMode from compose.service import get_container_data_volumes from compose.service import ImageType @@ -146,7 +149,13 @@ class ServiceTest(unittest.TestCase): def test_memory_swap_limit(self): self.mock_client.create_host_config.return_value = {} - service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, mem_limit=1000000000, memswap_limit=2000000000) + service = Service( + name='foo', + image='foo', + hostname='name', + client=self.mock_client, + mem_limit=1000000000, + memswap_limit=2000000000) service._get_container_create_options({'some': 'overrides'}, 1) self.assertTrue(self.mock_client.create_host_config.called) @@ -162,7 +171,12 @@ class ServiceTest(unittest.TestCase): def test_cgroup_parent(self): self.mock_client.create_host_config.return_value = {} - service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, cgroup_parent='test') + service = Service( + name='foo', + image='foo', + hostname='name', + client=self.mock_client, + cgroup_parent='test') service._get_container_create_options({'some': 'overrides'}, 1) self.assertTrue(self.mock_client.create_host_config.called) @@ -176,7 +190,13 @@ class ServiceTest(unittest.TestCase): log_opt = {'syslog-address': 'tcp://192.168.0.42:123'} logging = {'driver': 'syslog', 'options': log_opt} - service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, logging=logging) + service = Service( + name='foo', + image='foo', + hostname='name', + client=self.mock_client, + log_driver='syslog', + logging=logging) service._get_container_create_options({'some': 'overrides'}, 1) self.assertTrue(self.mock_client.create_host_config.called) @@ -237,7 +257,7 @@ class ServiceTest(unittest.TestCase): opts = service._get_container_create_options( {'name': name}, 1, - one_off=True) + one_off=OneOffFilter.only) self.assertEqual(opts['name'], name) def test_get_container_create_options_does_not_mutate_options(self): @@ -266,7 +286,7 @@ class ServiceTest(unittest.TestCase): self.assertEqual( opts['labels'][LABEL_CONFIG_HASH], - 'f8bfa1058ad1f4231372a0b1639f0dfdb574dafff4e8d7938049ae993f7cf1fc') + '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa') assert opts['environment'] == ['also=real'] def test_get_container_create_options_sets_affinity_with_binds(self): @@ -387,13 +407,20 @@ class ServiceTest(unittest.TestCase): self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "", ":")) self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag", ":")) self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "", ":")) - self.assertEqual(parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag", ":")) + self.assertEqual( + parse_repository_tag("url:5000/repo:tag"), + ("url:5000/repo", "tag", ":")) + self.assertEqual( + parse_repository_tag("root@sha256:digest"), + ("root", "sha256:digest", "@")) + self.assertEqual( + parse_repository_tag("user/repo@sha256:digest"), + ("user/repo", "sha256:digest", "@")) + self.assertEqual( + parse_repository_tag("url:5000/repo@sha256:digest"), + ("url:5000/repo", "sha256:digest", "@")) - self.assertEqual(parse_repository_tag("root@sha256:digest"), ("root", "sha256:digest", "@")) - self.assertEqual(parse_repository_tag("user/repo@sha256:digest"), ("user/repo", "sha256:digest", "@")) - self.assertEqual(parse_repository_tag("url:5000/repo@sha256:digest"), ("url:5000/repo", "sha256:digest", "@")) - - def test_create_container_with_build(self): + def test_create_container(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) self.mock_client.inspect_image.side_effect = [ NoSuchImageError, @@ -403,7 +430,12 @@ class ServiceTest(unittest.TestCase): '{"stream": "Successfully built abcd"}', ] - service.create_container(do_build=True) + with mock.patch('compose.service.log', autospec=True) as mock_log: + service.create_container() + assert mock_log.warn.called + _, args, _ = mock_log.warn.mock_calls[0] + assert 'was built because it did not already exist' in args[0] + self.mock_client.build.assert_called_once_with( tag='default_foo', dockerfile=None, @@ -416,18 +448,41 @@ class ServiceTest(unittest.TestCase): buildargs=None, ) - def test_create_container_no_build(self): + def test_ensure_image_exists_no_build(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) self.mock_client.inspect_image.return_value = {'Id': 'abc123'} - service.create_container(do_build=False) - self.assertFalse(self.mock_client.build.called) + service.ensure_image_exists(do_build=BuildAction.skip) + assert not self.mock_client.build.called - def test_create_container_no_build_but_needs_build(self): + def test_ensure_image_exists_no_build_but_needs_build(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) self.mock_client.inspect_image.side_effect = NoSuchImageError - with self.assertRaises(NeedsBuildError): - service.create_container(do_build=False) + with pytest.raises(NeedsBuildError): + service.ensure_image_exists(do_build=BuildAction.skip) + + def test_ensure_image_exists_force_build(self): + service = Service('foo', client=self.mock_client, build={'context': '.'}) + self.mock_client.inspect_image.return_value = {'Id': 'abc123'} + self.mock_client.build.return_value = [ + '{"stream": "Successfully built abcd"}', + ] + + with mock.patch('compose.service.log', autospec=True) as mock_log: + service.ensure_image_exists(do_build=BuildAction.force) + + assert not mock_log.warn.called + self.mock_client.build.assert_called_once_with( + tag='default_foo', + dockerfile=None, + stream=True, + path='.', + pull=False, + forcerm=False, + nocache=False, + rm=True, + buildargs=None, + ) def test_build_does_not_pull(self): self.mock_client.build.return_value = [ @@ -447,6 +502,7 @@ class ServiceTest(unittest.TestCase): image='example.com/foo', client=self.mock_client, network_mode=ServiceNetworkMode(Service('other')), + networks={'default': None}, links=[(Service('one'), 'one')], volumes_from=[VolumeFromSpec(Service('two'), 'rw', 'service')]) @@ -456,7 +512,7 @@ class ServiceTest(unittest.TestCase): 'options': {'image': 'example.com/foo'}, 'links': [('one', 'one')], 'net': 'other', - 'networks': [], + 'networks': {'default': None}, 'volumes_from': [('two', 'rw')], } assert config_dict == expected @@ -477,7 +533,7 @@ class ServiceTest(unittest.TestCase): 'image_id': 'abcd', 'options': {'image': 'example.com/foo'}, 'links': [], - 'networks': [], + 'networks': {}, 'net': 'aaabbb', 'volumes_from': [], } diff --git a/tox.ini b/tox.ini index dc85bc6da..61bc05745 100644 --- a/tox.ini +++ b/tox.ini @@ -15,7 +15,7 @@ deps = -rrequirements.txt -rrequirements-dev.txt commands = - py.test -v -rxs \ + py.test -v \ --cov=compose \ --cov-report html \ --cov-report term \ @@ -42,8 +42,10 @@ directory = coverage-html # end coverage configuration [flake8] -# Allow really long lines for now -max-line-length = 140 +max-line-length = 105 # Set this high for now -max-complexity = 12 +max-complexity = 11 exclude = compose/packages + +[pytest] +addopts = --tb=short -rxs