diff --git a/.circleci/config.yml b/.circleci/config.yml index f4e90d6de..08f8c42c3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,7 +10,7 @@ jobs: command: ./script/setup/osx - run: name: install tox - command: sudo pip install --upgrade tox==2.1.1 + command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0 - run: name: unit tests command: tox -e py27,py36,py37 -- tests/unit @@ -22,7 +22,7 @@ jobs: - checkout - run: name: upgrade python tools - command: sudo pip install --upgrade pip virtualenv + command: sudo pip install --upgrade pip virtualenv==16.2.0 - run: name: setup script command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx diff --git a/CHANGELOG.md b/CHANGELOG.md index c5eb1bb4d..440104fa6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ Change log ========== -1.24.0 (2019-01-25) +1.24.0 (2019-03-22) ------------------- ### Features @@ -11,6 +11,12 @@ Change log - Added a `--all` flag to `docker-compose ps` to include stopped one-off containers in the command's output. +- Add bash completion for `ps --all|-a` + +- Support for credential_spec + +- Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion + ### Bugfixes - Fixed a bug where some valid credential helpers weren't properly handled by Compose @@ -37,6 +43,16 @@ Change log - Missing images will no longer stop the execution of `docker-compose down` commands (a warning will be displayed instead). +- Force `virtualenv` version for macOS CI + +- Fix merging of compose files when network has `None` config + +- Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller` + +- Bump `docker-py` version to `3.7.1` to fix SSH issues + +- Fix release script and some typos on release documentation + 1.23.2 (2018-11-28) ------------------- diff --git a/Dockerfile b/Dockerfile index a14be492e..c5e7c815a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,6 +17,8 @@ ENV LANG en_US.UTF-8 RUN useradd -d /home/user -m -s /bin/bash user WORKDIR /code/ +# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed +RUN pip install virtualenv==16.2.0 RUN pip install tox==2.1.1 ADD requirements.txt /code/ @@ -25,6 +27,7 @@ ADD .pre-commit-config.yaml /code/ ADD setup.py /code/ ADD tox.ini /code/ ADD compose /code/compose/ +ADD README.md /code/ RUN tox --notest ADD . /code/ diff --git a/compose/__init__.py b/compose/__init__.py index bc5e6b116..5fd7d326b 100644 --- a/compose/__init__.py +++ b/compose/__init__.py @@ -1,4 +1,4 @@ from __future__ import absolute_import from __future__ import unicode_literals -__version__ = '1.24.0-rc1' +__version__ = '1.24.0-rc3' diff --git a/compose/cli/main.py b/compose/cli/main.py index 950e5055d..789601792 100644 --- a/compose/cli/main.py +++ b/compose/cli/main.py @@ -206,8 +206,8 @@ class TopLevelCommand(object): name specified in the client certificate --project-directory PATH Specify an alternate working directory (default: the path of the Compose file) - --compatibility If set, Compose will attempt to convert deploy - keys in v3 files to their non-Swarm equivalent + --compatibility If set, Compose will attempt to convert keys + in v3 files to their non-Swarm equivalent Commands: build Build or rebuild services diff --git a/compose/config/config.py b/compose/config/config.py index 3df211f73..f3142d80a 100644 --- a/compose/config/config.py +++ b/compose/config/config.py @@ -51,6 +51,7 @@ from .validation import match_named_volumes from .validation import validate_against_config_schema from .validation import validate_config_section from .validation import validate_cpu +from .validation import validate_credential_spec from .validation import validate_depends_on from .validation import validate_extends_file_path from .validation import validate_healthcheck @@ -369,7 +370,6 @@ def check_swarm_only_config(service_dicts, compatibility=False): ) if not compatibility: check_swarm_only_key(service_dicts, 'deploy') - check_swarm_only_key(service_dicts, 'credential_spec') check_swarm_only_key(service_dicts, 'configs') @@ -706,6 +706,7 @@ def validate_service(service_config, service_names, config_file): validate_depends_on(service_config, service_names) validate_links(service_config, service_names) validate_healthcheck(service_config) + validate_credential_spec(service_config) if not service_dict.get('image') and has_uppercase(service_name): raise ConfigurationError( @@ -894,6 +895,7 @@ def finalize_service(service_config, service_names, version, environment, compat normalize_build(service_dict, service_config.working_dir, environment) if compatibility: + service_dict = translate_credential_spec_to_security_opt(service_dict) service_dict, ignored_keys = translate_deploy_keys_to_container_config( service_dict ) @@ -930,6 +932,25 @@ def convert_restart_policy(name): raise ConfigurationError('Invalid restart policy "{}"'.format(name)) +def convert_credential_spec_to_security_opt(credential_spec): + if 'file' in credential_spec: + return 'file://{file}'.format(file=credential_spec['file']) + return 'registry://{registry}'.format(registry=credential_spec['registry']) + + +def translate_credential_spec_to_security_opt(service_dict): + result = [] + + if 'credential_spec' in service_dict: + spec = convert_credential_spec_to_security_opt(service_dict['credential_spec']) + result.append('credentialspec={spec}'.format(spec=spec)) + + if result: + service_dict['security_opt'] = result + + return service_dict + + def translate_deploy_keys_to_container_config(service_dict): if 'credential_spec' in service_dict: del service_dict['credential_spec'] @@ -1172,7 +1193,7 @@ def merge_networks(base, override): base = {k: {} for k in base} if isinstance(base, list) else base override = {k: {} for k in override} if isinstance(override, list) else override for network_name in all_network_names: - md = MergeDict(base.get(network_name, {}), override.get(network_name, {})) + md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {}) md.merge_field('aliases', merge_unique_items_lists, []) md.merge_field('link_local_ips', merge_unique_items_lists, []) md.merge_scalar('priority') diff --git a/compose/config/validation.py b/compose/config/validation.py index 039569551..1cceb71f0 100644 --- a/compose/config/validation.py +++ b/compose/config/validation.py @@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names): ) +def validate_credential_spec(service_config): + credential_spec = service_config.config.get('credential_spec') + if not credential_spec: + return + + if 'registry' not in credential_spec and 'file' not in credential_spec: + raise ConfigurationError( + "Service '{s.name}' is missing 'credential_spec.file' or " + "credential_spec.registry'".format(s=service_config) + ) + + def get_unsupported_config_msg(path, error_key): msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key) if error_key in DOCKER_CONFIG_HINTS: diff --git a/compose/service.py b/compose/service.py index 3c5e356a3..8c6702f1e 100644 --- a/compose/service.py +++ b/compose/service.py @@ -291,7 +291,7 @@ class Service(object): c for c in stopped_containers if self._containers_have_diverged([c]) ] for c in divergent_containers: - c.remove() + c.remove() all_containers = list(set(all_containers) - set(divergent_containers)) @@ -461,50 +461,50 @@ class Service(object): def _execute_convergence_recreate(self, containers, scale, timeout, detached, start, renew_anonymous_volumes): - if scale is not None and len(containers) > scale: - self._downscale(containers[scale:], timeout) - containers = containers[:scale] + if scale is not None and len(containers) > scale: + self._downscale(containers[scale:], timeout) + containers = containers[:scale] - def recreate(container): - return self.recreate_container( - container, timeout=timeout, attach_logs=not detached, - start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes - ) - containers, errors = parallel_execute( - containers, - recreate, - lambda c: c.name, - "Recreating", + def recreate(container): + return self.recreate_container( + container, timeout=timeout, attach_logs=not detached, + start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes ) + containers, errors = parallel_execute( + containers, + recreate, + lambda c: c.name, + "Recreating", + ) + for error in errors.values(): + raise OperationFailedError(error) + + if scale is not None and len(containers) < scale: + containers.extend(self._execute_convergence_create( + scale - len(containers), detached, start + )) + return containers + + def _execute_convergence_start(self, containers, scale, timeout, detached, start): + if scale is not None and len(containers) > scale: + self._downscale(containers[scale:], timeout) + containers = containers[:scale] + if start: + _, errors = parallel_execute( + containers, + lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True), + lambda c: c.name, + "Starting", + ) + for error in errors.values(): raise OperationFailedError(error) - if scale is not None and len(containers) < scale: - containers.extend(self._execute_convergence_create( - scale - len(containers), detached, start - )) - return containers - - def _execute_convergence_start(self, containers, scale, timeout, detached, start): - if scale is not None and len(containers) > scale: - self._downscale(containers[scale:], timeout) - containers = containers[:scale] - if start: - _, errors = parallel_execute( - containers, - lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True), - lambda c: c.name, - "Starting", - ) - - for error in errors.values(): - raise OperationFailedError(error) - - if scale is not None and len(containers) < scale: - containers.extend(self._execute_convergence_create( - scale - len(containers), detached, start - )) - return containers + if scale is not None and len(containers) < scale: + containers.extend(self._execute_convergence_create( + scale - len(containers), detached, start + )) + return containers def _downscale(self, containers, timeout=None): def stop_and_remove(container): diff --git a/contrib/completion/bash/docker-compose b/contrib/completion/bash/docker-compose index 395888d34..2add0c9cd 100644 --- a/contrib/completion/bash/docker-compose +++ b/contrib/completion/bash/docker-compose @@ -114,7 +114,7 @@ _docker_compose_build() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull --parallel" -- "$cur" ) ) ;; *) __docker_compose_complete_services --filter source=build @@ -361,7 +361,7 @@ _docker_compose_ps() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) ) ;; *) __docker_compose_complete_services diff --git a/contrib/completion/zsh/_docker-compose b/contrib/completion/zsh/_docker-compose index e55c91964..d25256c14 100755 --- a/contrib/completion/zsh/_docker-compose +++ b/contrib/completion/zsh/_docker-compose @@ -117,6 +117,7 @@ __docker-compose_subcommand() { '--no-cache[Do not use cache when building the image.]' \ '--pull[Always attempt to pull a newer version of the image.]' \ '--compress[Compress the build context using gzip.]' \ + '--parallel[Build images in parallel.]' \ '*:services:__docker-compose_services_from_build' && ret=0 ;; (bundle) @@ -339,7 +340,7 @@ _docker-compose() { '(- :)'{-h,--help}'[Get help]' \ '*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \ '(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \ - "--compatibility[If set, Compose will attempt to convert deploy keys in v3 files to their non-Swarm equivalent]" \ + "--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \ '(- :)'{-v,--version}'[Print version and exit]' \ '--verbose[Show more output]' \ '--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \ diff --git a/docker-compose.spec b/docker-compose.spec index 70db5c29e..5ca1e4c24 100644 --- a/docker-compose.spec +++ b/docker-compose.spec @@ -98,4 +98,5 @@ exe = EXE(pyz, debug=False, strip=None, upx=True, - console=True) + console=True, + bootloader_ignore_signals=True) diff --git a/requirements-dev.txt b/requirements-dev.txt index 4d74f6d15..bfb941152 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,5 @@ coverage==4.4.2 flake8==3.5.0 -mock>=1.0.1 +mock==2.0.0 pytest==3.6.3 pytest-cov==2.5.1 diff --git a/requirements.txt b/requirements.txt index 9333ec60d..2487f92c0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ cached-property==1.3.0 certifi==2017.4.17 chardet==3.0.4 colorama==0.4.0; sys_platform == 'win32' -docker==3.7.0 +docker==3.7.1 docker-pycreds==0.4.0 dockerpty==0.4.1 docopt==0.6.2 diff --git a/script/build/linux b/script/build/linux index 1a4cd4d9b..056940ad0 100755 --- a/script/build/linux +++ b/script/build/linux @@ -5,7 +5,7 @@ set -ex ./script/clean TAG="docker-compose" -docker build -t "$TAG" . | tail -n 200 +docker build -t "$TAG" . docker run \ --rm --entrypoint="script/build/linux-entrypoint" \ -v $(pwd)/dist:/code/dist \ diff --git a/script/release/README.md b/script/release/README.md index f7f911e53..0c6f12cbe 100644 --- a/script/release/README.md +++ b/script/release/README.md @@ -40,7 +40,7 @@ This API token should be exposed to the release script through the ### A Bintray account and Bintray API key Your Bintray account will need to be an admin member of the -[docker-compose organization](https://github.com/settings/tokens). +[docker-compose organization](https://bintray.com/docker-compose). Additionally, you should generate a personal API key. To do so, click your username in the top-right hand corner and select "Edit profile" ; on the new page, select "API key" in the left-side menu. @@ -129,7 +129,7 @@ assets public), proceed to the "Finalize a release" section of this guide. Once you're ready to make your release public, you may execute the following command from the root of the Compose repository: ``` -./script/release/release.sh -b finalize RELEAE_VERSION +./script/release/release.sh -b finalize RELEASE_VERSION ``` Note that this command will create and publish versioned assets to the public. diff --git a/script/release/release.py b/script/release/release.py index 63bf863df..9db1a49d9 100755 --- a/script/release/release.py +++ b/script/release/release.py @@ -7,7 +7,6 @@ import os import shutil import sys import time -from distutils.core import run_setup from jinja2 import Template from release.bintray import BintrayAPI @@ -276,7 +275,8 @@ def finalize(args): repository.checkout_branch(br_name) - run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel']) + os.system('python {setup_script} sdist bdist_wheel'.format( + setup_script=os.path.join(REPO_ROOT, 'setup.py'))) merge_status = pr_data.merge() if not merge_status.merged and not args.finalize_resume: diff --git a/script/run/run.sh b/script/run/run.sh index df3f2298f..edc2e0c0e 100755 --- a/script/run/run.sh +++ b/script/run/run.sh @@ -15,7 +15,7 @@ set -e -VERSION="1.24.0-rc1" +VERSION="1.24.0-rc3" IMAGE="docker/compose:$VERSION" diff --git a/script/setup/osx b/script/setup/osx index 08491b6e5..1b546816d 100755 --- a/script/setup/osx +++ b/script/setup/osx @@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62 fi -OPENSSL_VERSION=1.1.0h +OPENSSL_VERSION=1.1.0j OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz -OPENSSL_SHA1=0fc39f6aa91b6e7f4d05018f7c5e991e1d2491fd +OPENSSL_SHA1=dcad1efbacd9a4ed67d4514470af12bbe2a1d60a -PYTHON_VERSION=3.6.6 +PYTHON_VERSION=3.6.8 PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz -PYTHON_SHA1=ae1fc9ddd29ad8c1d5f7b0d799ff0787efeb9652 +PYTHON_SHA1=09fcc4edaef0915b4dedbfb462f1cd15f82d3a6f # # Install prerequisites. diff --git a/tests/unit/cli/log_printer_test.py b/tests/unit/cli/log_printer_test.py index d0c4b56be..6db24e464 100644 --- a/tests/unit/cli/log_printer_test.py +++ b/tests/unit/cli/log_printer_test.py @@ -193,7 +193,7 @@ class TestConsumeQueue(object): queue.put(item) generator = consume_queue(queue, True) - assert next(generator) is 'foobar-1' + assert next(generator) == 'foobar-1' def test_item_is_none_when_timeout_is_hit(self): queue = Queue() diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py index 8baf8e4ee..50d8e13a8 100644 --- a/tests/unit/config/config_test.py +++ b/tests/unit/config/config_test.py @@ -3593,6 +3593,9 @@ class InterpolationTest(unittest.TestCase): 'reservations': {'memory': '100M'}, }, }, + 'credential_spec': { + 'file': 'spec.json' + }, }, }, }) @@ -3610,7 +3613,8 @@ class InterpolationTest(unittest.TestCase): 'mem_limit': '300M', 'mem_reservation': '100M', 'cpus': 0.7, - 'name': 'foo' + 'name': 'foo', + 'security_opt': ['credentialspec=file://spec.json'], } @mock.patch.dict(os.environ) @@ -3928,6 +3932,24 @@ class MergeNetworksTest(unittest.TestCase, MergeListsTest): } } + def test_network_has_none_value(self): + service_dict = config.merge_service_dicts( + {self.config_name: { + 'default': None + }}, + {self.config_name: { + 'default': { + 'aliases': [] + } + }}, + DEFAULT_VERSION) + + assert service_dict[self.config_name] == { + 'default': { + 'aliases': [] + } + } + def test_all_properties(self): service_dict = config.merge_service_dicts( {self.config_name: {