Merge pull request #6609 from docker/bump-1.24.0-rc3

Bump 1.24.0-rc3
This commit is contained in:
Ulysses Souza 2019-03-22 15:51:21 +01:00 committed by GitHub
commit 3a3288c54b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 142 additions and 66 deletions

View File

@ -10,7 +10,7 @@ jobs:
command: ./script/setup/osx command: ./script/setup/osx
- run: - run:
name: install tox name: install tox
command: sudo pip install --upgrade tox==2.1.1 command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
- run: - run:
name: unit tests name: unit tests
command: tox -e py27,py36,py37 -- tests/unit command: tox -e py27,py36,py37 -- tests/unit
@ -22,7 +22,7 @@ jobs:
- checkout - checkout
- run: - run:
name: upgrade python tools name: upgrade python tools
command: sudo pip install --upgrade pip virtualenv command: sudo pip install --upgrade pip virtualenv==16.2.0
- run: - run:
name: setup script name: setup script
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx

View File

@ -1,7 +1,7 @@
Change log Change log
========== ==========
1.24.0 (2019-01-25) 1.24.0 (2019-03-22)
------------------- -------------------
### Features ### Features
@ -11,6 +11,12 @@ Change log
- Added a `--all` flag to `docker-compose ps` to include stopped one-off containers - Added a `--all` flag to `docker-compose ps` to include stopped one-off containers
in the command's output. in the command's output.
- Add bash completion for `ps --all|-a`
- Support for credential_spec
- Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion
### Bugfixes ### Bugfixes
- Fixed a bug where some valid credential helpers weren't properly handled by Compose - Fixed a bug where some valid credential helpers weren't properly handled by Compose
@ -37,6 +43,16 @@ Change log
- Missing images will no longer stop the execution of `docker-compose down` commands - Missing images will no longer stop the execution of `docker-compose down` commands
(a warning will be displayed instead). (a warning will be displayed instead).
- Force `virtualenv` version for macOS CI
- Fix merging of compose files when network has `None` config
- Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`
- Bump `docker-py` version to `3.7.1` to fix SSH issues
- Fix release script and some typos on release documentation
1.23.2 (2018-11-28) 1.23.2 (2018-11-28)
------------------- -------------------

View File

@ -17,6 +17,8 @@ ENV LANG en_US.UTF-8
RUN useradd -d /home/user -m -s /bin/bash user RUN useradd -d /home/user -m -s /bin/bash user
WORKDIR /code/ WORKDIR /code/
# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
RUN pip install virtualenv==16.2.0
RUN pip install tox==2.1.1 RUN pip install tox==2.1.1
ADD requirements.txt /code/ ADD requirements.txt /code/
@ -25,6 +27,7 @@ ADD .pre-commit-config.yaml /code/
ADD setup.py /code/ ADD setup.py /code/
ADD tox.ini /code/ ADD tox.ini /code/
ADD compose /code/compose/ ADD compose /code/compose/
ADD README.md /code/
RUN tox --notest RUN tox --notest
ADD . /code/ ADD . /code/

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
__version__ = '1.24.0-rc1' __version__ = '1.24.0-rc3'

View File

@ -206,8 +206,8 @@ class TopLevelCommand(object):
name specified in the client certificate name specified in the client certificate
--project-directory PATH Specify an alternate working directory --project-directory PATH Specify an alternate working directory
(default: the path of the Compose file) (default: the path of the Compose file)
--compatibility If set, Compose will attempt to convert deploy --compatibility If set, Compose will attempt to convert keys
keys in v3 files to their non-Swarm equivalent in v3 files to their non-Swarm equivalent
Commands: Commands:
build Build or rebuild services build Build or rebuild services

View File

@ -51,6 +51,7 @@ from .validation import match_named_volumes
from .validation import validate_against_config_schema from .validation import validate_against_config_schema
from .validation import validate_config_section from .validation import validate_config_section
from .validation import validate_cpu from .validation import validate_cpu
from .validation import validate_credential_spec
from .validation import validate_depends_on from .validation import validate_depends_on
from .validation import validate_extends_file_path from .validation import validate_extends_file_path
from .validation import validate_healthcheck from .validation import validate_healthcheck
@ -369,7 +370,6 @@ def check_swarm_only_config(service_dicts, compatibility=False):
) )
if not compatibility: if not compatibility:
check_swarm_only_key(service_dicts, 'deploy') check_swarm_only_key(service_dicts, 'deploy')
check_swarm_only_key(service_dicts, 'credential_spec')
check_swarm_only_key(service_dicts, 'configs') check_swarm_only_key(service_dicts, 'configs')
@ -706,6 +706,7 @@ def validate_service(service_config, service_names, config_file):
validate_depends_on(service_config, service_names) validate_depends_on(service_config, service_names)
validate_links(service_config, service_names) validate_links(service_config, service_names)
validate_healthcheck(service_config) validate_healthcheck(service_config)
validate_credential_spec(service_config)
if not service_dict.get('image') and has_uppercase(service_name): if not service_dict.get('image') and has_uppercase(service_name):
raise ConfigurationError( raise ConfigurationError(
@ -894,6 +895,7 @@ def finalize_service(service_config, service_names, version, environment, compat
normalize_build(service_dict, service_config.working_dir, environment) normalize_build(service_dict, service_config.working_dir, environment)
if compatibility: if compatibility:
service_dict = translate_credential_spec_to_security_opt(service_dict)
service_dict, ignored_keys = translate_deploy_keys_to_container_config( service_dict, ignored_keys = translate_deploy_keys_to_container_config(
service_dict service_dict
) )
@ -930,6 +932,25 @@ def convert_restart_policy(name):
raise ConfigurationError('Invalid restart policy "{}"'.format(name)) raise ConfigurationError('Invalid restart policy "{}"'.format(name))
def convert_credential_spec_to_security_opt(credential_spec):
if 'file' in credential_spec:
return 'file://{file}'.format(file=credential_spec['file'])
return 'registry://{registry}'.format(registry=credential_spec['registry'])
def translate_credential_spec_to_security_opt(service_dict):
result = []
if 'credential_spec' in service_dict:
spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
result.append('credentialspec={spec}'.format(spec=spec))
if result:
service_dict['security_opt'] = result
return service_dict
def translate_deploy_keys_to_container_config(service_dict): def translate_deploy_keys_to_container_config(service_dict):
if 'credential_spec' in service_dict: if 'credential_spec' in service_dict:
del service_dict['credential_spec'] del service_dict['credential_spec']
@ -1172,7 +1193,7 @@ def merge_networks(base, override):
base = {k: {} for k in base} if isinstance(base, list) else base base = {k: {} for k in base} if isinstance(base, list) else base
override = {k: {} for k in override} if isinstance(override, list) else override override = {k: {} for k in override} if isinstance(override, list) else override
for network_name in all_network_names: for network_name in all_network_names:
md = MergeDict(base.get(network_name, {}), override.get(network_name, {})) md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
md.merge_field('aliases', merge_unique_items_lists, []) md.merge_field('aliases', merge_unique_items_lists, [])
md.merge_field('link_local_ips', merge_unique_items_lists, []) md.merge_field('link_local_ips', merge_unique_items_lists, [])
md.merge_scalar('priority') md.merge_scalar('priority')

View File

@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names):
) )
def validate_credential_spec(service_config):
credential_spec = service_config.config.get('credential_spec')
if not credential_spec:
return
if 'registry' not in credential_spec and 'file' not in credential_spec:
raise ConfigurationError(
"Service '{s.name}' is missing 'credential_spec.file' or "
"credential_spec.registry'".format(s=service_config)
)
def get_unsupported_config_msg(path, error_key): def get_unsupported_config_msg(path, error_key):
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key) msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
if error_key in DOCKER_CONFIG_HINTS: if error_key in DOCKER_CONFIG_HINTS:

View File

@ -291,7 +291,7 @@ class Service(object):
c for c in stopped_containers if self._containers_have_diverged([c]) c for c in stopped_containers if self._containers_have_diverged([c])
] ]
for c in divergent_containers: for c in divergent_containers:
c.remove() c.remove()
all_containers = list(set(all_containers) - set(divergent_containers)) all_containers = list(set(all_containers) - set(divergent_containers))
@ -461,50 +461,50 @@ class Service(object):
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start, def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
renew_anonymous_volumes): renew_anonymous_volumes):
if scale is not None and len(containers) > scale: if scale is not None and len(containers) > scale:
self._downscale(containers[scale:], timeout) self._downscale(containers[scale:], timeout)
containers = containers[:scale] containers = containers[:scale]
def recreate(container): def recreate(container):
return self.recreate_container( return self.recreate_container(
container, timeout=timeout, attach_logs=not detached, container, timeout=timeout, attach_logs=not detached,
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
)
containers, errors = parallel_execute(
containers,
recreate,
lambda c: c.name,
"Recreating",
) )
containers, errors = parallel_execute(
containers,
recreate,
lambda c: c.name,
"Recreating",
)
for error in errors.values():
raise OperationFailedError(error)
if scale is not None and len(containers) < scale:
containers.extend(self._execute_convergence_create(
scale - len(containers), detached, start
))
return containers
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
if scale is not None and len(containers) > scale:
self._downscale(containers[scale:], timeout)
containers = containers[:scale]
if start:
_, errors = parallel_execute(
containers,
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
lambda c: c.name,
"Starting",
)
for error in errors.values(): for error in errors.values():
raise OperationFailedError(error) raise OperationFailedError(error)
if scale is not None and len(containers) < scale: if scale is not None and len(containers) < scale:
containers.extend(self._execute_convergence_create( containers.extend(self._execute_convergence_create(
scale - len(containers), detached, start scale - len(containers), detached, start
)) ))
return containers return containers
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
if scale is not None and len(containers) > scale:
self._downscale(containers[scale:], timeout)
containers = containers[:scale]
if start:
_, errors = parallel_execute(
containers,
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
lambda c: c.name,
"Starting",
)
for error in errors.values():
raise OperationFailedError(error)
if scale is not None and len(containers) < scale:
containers.extend(self._execute_convergence_create(
scale - len(containers), detached, start
))
return containers
def _downscale(self, containers, timeout=None): def _downscale(self, containers, timeout=None):
def stop_and_remove(container): def stop_and_remove(container):

View File

@ -114,7 +114,7 @@ _docker_compose_build() {
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull --parallel" -- "$cur" ) )
;; ;;
*) *)
__docker_compose_complete_services --filter source=build __docker_compose_complete_services --filter source=build
@ -361,7 +361,7 @@ _docker_compose_ps() {
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) )
;; ;;
*) *)
__docker_compose_complete_services __docker_compose_complete_services

View File

@ -117,6 +117,7 @@ __docker-compose_subcommand() {
'--no-cache[Do not use cache when building the image.]' \ '--no-cache[Do not use cache when building the image.]' \
'--pull[Always attempt to pull a newer version of the image.]' \ '--pull[Always attempt to pull a newer version of the image.]' \
'--compress[Compress the build context using gzip.]' \ '--compress[Compress the build context using gzip.]' \
'--parallel[Build images in parallel.]' \
'*:services:__docker-compose_services_from_build' && ret=0 '*:services:__docker-compose_services_from_build' && ret=0
;; ;;
(bundle) (bundle)
@ -339,7 +340,7 @@ _docker-compose() {
'(- :)'{-h,--help}'[Get help]' \ '(- :)'{-h,--help}'[Get help]' \
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \ '*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \ '(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
"--compatibility[If set, Compose will attempt to convert deploy keys in v3 files to their non-Swarm equivalent]" \ "--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \
'(- :)'{-v,--version}'[Print version and exit]' \ '(- :)'{-v,--version}'[Print version and exit]' \
'--verbose[Show more output]' \ '--verbose[Show more output]' \
'--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \ '--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \

View File

@ -98,4 +98,5 @@ exe = EXE(pyz,
debug=False, debug=False,
strip=None, strip=None,
upx=True, upx=True,
console=True) console=True,
bootloader_ignore_signals=True)

View File

@ -1,5 +1,5 @@
coverage==4.4.2 coverage==4.4.2
flake8==3.5.0 flake8==3.5.0
mock>=1.0.1 mock==2.0.0
pytest==3.6.3 pytest==3.6.3
pytest-cov==2.5.1 pytest-cov==2.5.1

View File

@ -3,7 +3,7 @@ cached-property==1.3.0
certifi==2017.4.17 certifi==2017.4.17
chardet==3.0.4 chardet==3.0.4
colorama==0.4.0; sys_platform == 'win32' colorama==0.4.0; sys_platform == 'win32'
docker==3.7.0 docker==3.7.1
docker-pycreds==0.4.0 docker-pycreds==0.4.0
dockerpty==0.4.1 dockerpty==0.4.1
docopt==0.6.2 docopt==0.6.2

View File

@ -5,7 +5,7 @@ set -ex
./script/clean ./script/clean
TAG="docker-compose" TAG="docker-compose"
docker build -t "$TAG" . | tail -n 200 docker build -t "$TAG" .
docker run \ docker run \
--rm --entrypoint="script/build/linux-entrypoint" \ --rm --entrypoint="script/build/linux-entrypoint" \
-v $(pwd)/dist:/code/dist \ -v $(pwd)/dist:/code/dist \

View File

@ -40,7 +40,7 @@ This API token should be exposed to the release script through the
### A Bintray account and Bintray API key ### A Bintray account and Bintray API key
Your Bintray account will need to be an admin member of the Your Bintray account will need to be an admin member of the
[docker-compose organization](https://github.com/settings/tokens). [docker-compose organization](https://bintray.com/docker-compose).
Additionally, you should generate a personal API key. To do so, click your Additionally, you should generate a personal API key. To do so, click your
username in the top-right hand corner and select "Edit profile" ; on the new username in the top-right hand corner and select "Edit profile" ; on the new
page, select "API key" in the left-side menu. page, select "API key" in the left-side menu.
@ -129,7 +129,7 @@ assets public), proceed to the "Finalize a release" section of this guide.
Once you're ready to make your release public, you may execute the following Once you're ready to make your release public, you may execute the following
command from the root of the Compose repository: command from the root of the Compose repository:
``` ```
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEAE_VERSION ./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
``` ```
Note that this command will create and publish versioned assets to the public. Note that this command will create and publish versioned assets to the public.

View File

@ -7,7 +7,6 @@ import os
import shutil import shutil
import sys import sys
import time import time
from distutils.core import run_setup
from jinja2 import Template from jinja2 import Template
from release.bintray import BintrayAPI from release.bintray import BintrayAPI
@ -276,7 +275,8 @@ def finalize(args):
repository.checkout_branch(br_name) repository.checkout_branch(br_name)
run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel']) os.system('python {setup_script} sdist bdist_wheel'.format(
setup_script=os.path.join(REPO_ROOT, 'setup.py')))
merge_status = pr_data.merge() merge_status = pr_data.merge()
if not merge_status.merged and not args.finalize_resume: if not merge_status.merged and not args.finalize_resume:

View File

@ -15,7 +15,7 @@
set -e set -e
VERSION="1.24.0-rc1" VERSION="1.24.0-rc3"
IMAGE="docker/compose:$VERSION" IMAGE="docker/compose:$VERSION"

View File

@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62 SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
fi fi
OPENSSL_VERSION=1.1.0h OPENSSL_VERSION=1.1.0j
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
OPENSSL_SHA1=0fc39f6aa91b6e7f4d05018f7c5e991e1d2491fd OPENSSL_SHA1=dcad1efbacd9a4ed67d4514470af12bbe2a1d60a
PYTHON_VERSION=3.6.6 PYTHON_VERSION=3.6.8
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
PYTHON_SHA1=ae1fc9ddd29ad8c1d5f7b0d799ff0787efeb9652 PYTHON_SHA1=09fcc4edaef0915b4dedbfb462f1cd15f82d3a6f
# #
# Install prerequisites. # Install prerequisites.

View File

@ -193,7 +193,7 @@ class TestConsumeQueue(object):
queue.put(item) queue.put(item)
generator = consume_queue(queue, True) generator = consume_queue(queue, True)
assert next(generator) is 'foobar-1' assert next(generator) == 'foobar-1'
def test_item_is_none_when_timeout_is_hit(self): def test_item_is_none_when_timeout_is_hit(self):
queue = Queue() queue = Queue()

View File

@ -3593,6 +3593,9 @@ class InterpolationTest(unittest.TestCase):
'reservations': {'memory': '100M'}, 'reservations': {'memory': '100M'},
}, },
}, },
'credential_spec': {
'file': 'spec.json'
},
}, },
}, },
}) })
@ -3610,7 +3613,8 @@ class InterpolationTest(unittest.TestCase):
'mem_limit': '300M', 'mem_limit': '300M',
'mem_reservation': '100M', 'mem_reservation': '100M',
'cpus': 0.7, 'cpus': 0.7,
'name': 'foo' 'name': 'foo',
'security_opt': ['credentialspec=file://spec.json'],
} }
@mock.patch.dict(os.environ) @mock.patch.dict(os.environ)
@ -3928,6 +3932,24 @@ class MergeNetworksTest(unittest.TestCase, MergeListsTest):
} }
} }
def test_network_has_none_value(self):
service_dict = config.merge_service_dicts(
{self.config_name: {
'default': None
}},
{self.config_name: {
'default': {
'aliases': []
}
}},
DEFAULT_VERSION)
assert service_dict[self.config_name] == {
'default': {
'aliases': []
}
}
def test_all_properties(self): def test_all_properties(self):
service_dict = config.merge_service_dicts( service_dict = config.merge_service_dicts(
{self.config_name: { {self.config_name: {