mirror of https://github.com/docker/compose.git
commit
3a3288c54b
|
@ -10,7 +10,7 @@ jobs:
|
|||
command: ./script/setup/osx
|
||||
- run:
|
||||
name: install tox
|
||||
command: sudo pip install --upgrade tox==2.1.1
|
||||
command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
|
||||
- run:
|
||||
name: unit tests
|
||||
command: tox -e py27,py36,py37 -- tests/unit
|
||||
|
@ -22,7 +22,7 @@ jobs:
|
|||
- checkout
|
||||
- run:
|
||||
name: upgrade python tools
|
||||
command: sudo pip install --upgrade pip virtualenv
|
||||
command: sudo pip install --upgrade pip virtualenv==16.2.0
|
||||
- run:
|
||||
name: setup script
|
||||
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
|
||||
|
|
18
CHANGELOG.md
18
CHANGELOG.md
|
@ -1,7 +1,7 @@
|
|||
Change log
|
||||
==========
|
||||
|
||||
1.24.0 (2019-01-25)
|
||||
1.24.0 (2019-03-22)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
@ -11,6 +11,12 @@ Change log
|
|||
- Added a `--all` flag to `docker-compose ps` to include stopped one-off containers
|
||||
in the command's output.
|
||||
|
||||
- Add bash completion for `ps --all|-a`
|
||||
|
||||
- Support for credential_spec
|
||||
|
||||
- Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed a bug where some valid credential helpers weren't properly handled by Compose
|
||||
|
@ -37,6 +43,16 @@ Change log
|
|||
- Missing images will no longer stop the execution of `docker-compose down` commands
|
||||
(a warning will be displayed instead).
|
||||
|
||||
- Force `virtualenv` version for macOS CI
|
||||
|
||||
- Fix merging of compose files when network has `None` config
|
||||
|
||||
- Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`
|
||||
|
||||
- Bump `docker-py` version to `3.7.1` to fix SSH issues
|
||||
|
||||
- Fix release script and some typos on release documentation
|
||||
|
||||
1.23.2 (2018-11-28)
|
||||
-------------------
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@ ENV LANG en_US.UTF-8
|
|||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
WORKDIR /code/
|
||||
|
||||
# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
|
||||
RUN pip install virtualenv==16.2.0
|
||||
RUN pip install tox==2.1.1
|
||||
|
||||
ADD requirements.txt /code/
|
||||
|
@ -25,6 +27,7 @@ ADD .pre-commit-config.yaml /code/
|
|||
ADD setup.py /code/
|
||||
ADD tox.ini /code/
|
||||
ADD compose /code/compose/
|
||||
ADD README.md /code/
|
||||
RUN tox --notest
|
||||
|
||||
ADD . /code/
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '1.24.0-rc1'
|
||||
__version__ = '1.24.0-rc3'
|
||||
|
|
|
@ -206,8 +206,8 @@ class TopLevelCommand(object):
|
|||
name specified in the client certificate
|
||||
--project-directory PATH Specify an alternate working directory
|
||||
(default: the path of the Compose file)
|
||||
--compatibility If set, Compose will attempt to convert deploy
|
||||
keys in v3 files to their non-Swarm equivalent
|
||||
--compatibility If set, Compose will attempt to convert keys
|
||||
in v3 files to their non-Swarm equivalent
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
|
|
|
@ -51,6 +51,7 @@ from .validation import match_named_volumes
|
|||
from .validation import validate_against_config_schema
|
||||
from .validation import validate_config_section
|
||||
from .validation import validate_cpu
|
||||
from .validation import validate_credential_spec
|
||||
from .validation import validate_depends_on
|
||||
from .validation import validate_extends_file_path
|
||||
from .validation import validate_healthcheck
|
||||
|
@ -369,7 +370,6 @@ def check_swarm_only_config(service_dicts, compatibility=False):
|
|||
)
|
||||
if not compatibility:
|
||||
check_swarm_only_key(service_dicts, 'deploy')
|
||||
check_swarm_only_key(service_dicts, 'credential_spec')
|
||||
check_swarm_only_key(service_dicts, 'configs')
|
||||
|
||||
|
||||
|
@ -706,6 +706,7 @@ def validate_service(service_config, service_names, config_file):
|
|||
validate_depends_on(service_config, service_names)
|
||||
validate_links(service_config, service_names)
|
||||
validate_healthcheck(service_config)
|
||||
validate_credential_spec(service_config)
|
||||
|
||||
if not service_dict.get('image') and has_uppercase(service_name):
|
||||
raise ConfigurationError(
|
||||
|
@ -894,6 +895,7 @@ def finalize_service(service_config, service_names, version, environment, compat
|
|||
normalize_build(service_dict, service_config.working_dir, environment)
|
||||
|
||||
if compatibility:
|
||||
service_dict = translate_credential_spec_to_security_opt(service_dict)
|
||||
service_dict, ignored_keys = translate_deploy_keys_to_container_config(
|
||||
service_dict
|
||||
)
|
||||
|
@ -930,6 +932,25 @@ def convert_restart_policy(name):
|
|||
raise ConfigurationError('Invalid restart policy "{}"'.format(name))
|
||||
|
||||
|
||||
def convert_credential_spec_to_security_opt(credential_spec):
|
||||
if 'file' in credential_spec:
|
||||
return 'file://{file}'.format(file=credential_spec['file'])
|
||||
return 'registry://{registry}'.format(registry=credential_spec['registry'])
|
||||
|
||||
|
||||
def translate_credential_spec_to_security_opt(service_dict):
|
||||
result = []
|
||||
|
||||
if 'credential_spec' in service_dict:
|
||||
spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
|
||||
result.append('credentialspec={spec}'.format(spec=spec))
|
||||
|
||||
if result:
|
||||
service_dict['security_opt'] = result
|
||||
|
||||
return service_dict
|
||||
|
||||
|
||||
def translate_deploy_keys_to_container_config(service_dict):
|
||||
if 'credential_spec' in service_dict:
|
||||
del service_dict['credential_spec']
|
||||
|
@ -1172,7 +1193,7 @@ def merge_networks(base, override):
|
|||
base = {k: {} for k in base} if isinstance(base, list) else base
|
||||
override = {k: {} for k in override} if isinstance(override, list) else override
|
||||
for network_name in all_network_names:
|
||||
md = MergeDict(base.get(network_name, {}), override.get(network_name, {}))
|
||||
md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
|
||||
md.merge_field('aliases', merge_unique_items_lists, [])
|
||||
md.merge_field('link_local_ips', merge_unique_items_lists, [])
|
||||
md.merge_scalar('priority')
|
||||
|
|
|
@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names):
|
|||
)
|
||||
|
||||
|
||||
def validate_credential_spec(service_config):
|
||||
credential_spec = service_config.config.get('credential_spec')
|
||||
if not credential_spec:
|
||||
return
|
||||
|
||||
if 'registry' not in credential_spec and 'file' not in credential_spec:
|
||||
raise ConfigurationError(
|
||||
"Service '{s.name}' is missing 'credential_spec.file' or "
|
||||
"credential_spec.registry'".format(s=service_config)
|
||||
)
|
||||
|
||||
|
||||
def get_unsupported_config_msg(path, error_key):
|
||||
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
|
||||
if error_key in DOCKER_CONFIG_HINTS:
|
||||
|
|
|
@ -291,7 +291,7 @@ class Service(object):
|
|||
c for c in stopped_containers if self._containers_have_diverged([c])
|
||||
]
|
||||
for c in divergent_containers:
|
||||
c.remove()
|
||||
c.remove()
|
||||
|
||||
all_containers = list(set(all_containers) - set(divergent_containers))
|
||||
|
||||
|
@ -461,50 +461,50 @@ class Service(object):
|
|||
|
||||
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
|
||||
renew_anonymous_volumes):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
)
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _downscale(self, containers, timeout=None):
|
||||
def stop_and_remove(container):
|
||||
|
|
|
@ -114,7 +114,7 @@ _docker_compose_build() {
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull --parallel" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services --filter source=build
|
||||
|
@ -361,7 +361,7 @@ _docker_compose_ps() {
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services
|
||||
|
|
|
@ -117,6 +117,7 @@ __docker-compose_subcommand() {
|
|||
'--no-cache[Do not use cache when building the image.]' \
|
||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||
'--compress[Compress the build context using gzip.]' \
|
||||
'--parallel[Build images in parallel.]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(bundle)
|
||||
|
@ -339,7 +340,7 @@ _docker-compose() {
|
|||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
"--compatibility[If set, Compose will attempt to convert deploy keys in v3 files to their non-Swarm equivalent]" \
|
||||
"--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'--verbose[Show more output]' \
|
||||
'--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \
|
||||
|
|
|
@ -98,4 +98,5 @@ exe = EXE(pyz,
|
|||
debug=False,
|
||||
strip=None,
|
||||
upx=True,
|
||||
console=True)
|
||||
console=True,
|
||||
bootloader_ignore_signals=True)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
coverage==4.4.2
|
||||
flake8==3.5.0
|
||||
mock>=1.0.1
|
||||
mock==2.0.0
|
||||
pytest==3.6.3
|
||||
pytest-cov==2.5.1
|
||||
|
|
|
@ -3,7 +3,7 @@ cached-property==1.3.0
|
|||
certifi==2017.4.17
|
||||
chardet==3.0.4
|
||||
colorama==0.4.0; sys_platform == 'win32'
|
||||
docker==3.7.0
|
||||
docker==3.7.1
|
||||
docker-pycreds==0.4.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
|
|
|
@ -5,7 +5,7 @@ set -ex
|
|||
./script/clean
|
||||
|
||||
TAG="docker-compose"
|
||||
docker build -t "$TAG" . | tail -n 200
|
||||
docker build -t "$TAG" .
|
||||
docker run \
|
||||
--rm --entrypoint="script/build/linux-entrypoint" \
|
||||
-v $(pwd)/dist:/code/dist \
|
||||
|
|
|
@ -40,7 +40,7 @@ This API token should be exposed to the release script through the
|
|||
### A Bintray account and Bintray API key
|
||||
|
||||
Your Bintray account will need to be an admin member of the
|
||||
[docker-compose organization](https://github.com/settings/tokens).
|
||||
[docker-compose organization](https://bintray.com/docker-compose).
|
||||
Additionally, you should generate a personal API key. To do so, click your
|
||||
username in the top-right hand corner and select "Edit profile" ; on the new
|
||||
page, select "API key" in the left-side menu.
|
||||
|
@ -129,7 +129,7 @@ assets public), proceed to the "Finalize a release" section of this guide.
|
|||
Once you're ready to make your release public, you may execute the following
|
||||
command from the root of the Compose repository:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEAE_VERSION
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
|
||||
```
|
||||
|
||||
Note that this command will create and publish versioned assets to the public.
|
||||
|
|
|
@ -7,7 +7,6 @@ import os
|
|||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from distutils.core import run_setup
|
||||
|
||||
from jinja2 import Template
|
||||
from release.bintray import BintrayAPI
|
||||
|
@ -276,7 +275,8 @@ def finalize(args):
|
|||
|
||||
repository.checkout_branch(br_name)
|
||||
|
||||
run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel'])
|
||||
os.system('python {setup_script} sdist bdist_wheel'.format(
|
||||
setup_script=os.path.join(REPO_ROOT, 'setup.py')))
|
||||
|
||||
merge_status = pr_data.merge()
|
||||
if not merge_status.merged and not args.finalize_resume:
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
set -e
|
||||
|
||||
VERSION="1.24.0-rc1"
|
||||
VERSION="1.24.0-rc3"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
|
|
|
@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
|
|||
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
|
||||
fi
|
||||
|
||||
OPENSSL_VERSION=1.1.0h
|
||||
OPENSSL_VERSION=1.1.0j
|
||||
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
|
||||
OPENSSL_SHA1=0fc39f6aa91b6e7f4d05018f7c5e991e1d2491fd
|
||||
OPENSSL_SHA1=dcad1efbacd9a4ed67d4514470af12bbe2a1d60a
|
||||
|
||||
PYTHON_VERSION=3.6.6
|
||||
PYTHON_VERSION=3.6.8
|
||||
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
|
||||
PYTHON_SHA1=ae1fc9ddd29ad8c1d5f7b0d799ff0787efeb9652
|
||||
PYTHON_SHA1=09fcc4edaef0915b4dedbfb462f1cd15f82d3a6f
|
||||
|
||||
#
|
||||
# Install prerequisites.
|
||||
|
|
|
@ -193,7 +193,7 @@ class TestConsumeQueue(object):
|
|||
queue.put(item)
|
||||
|
||||
generator = consume_queue(queue, True)
|
||||
assert next(generator) is 'foobar-1'
|
||||
assert next(generator) == 'foobar-1'
|
||||
|
||||
def test_item_is_none_when_timeout_is_hit(self):
|
||||
queue = Queue()
|
||||
|
|
|
@ -3593,6 +3593,9 @@ class InterpolationTest(unittest.TestCase):
|
|||
'reservations': {'memory': '100M'},
|
||||
},
|
||||
},
|
||||
'credential_spec': {
|
||||
'file': 'spec.json'
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -3610,7 +3613,8 @@ class InterpolationTest(unittest.TestCase):
|
|||
'mem_limit': '300M',
|
||||
'mem_reservation': '100M',
|
||||
'cpus': 0.7,
|
||||
'name': 'foo'
|
||||
'name': 'foo',
|
||||
'security_opt': ['credentialspec=file://spec.json'],
|
||||
}
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
|
@ -3928,6 +3932,24 @@ class MergeNetworksTest(unittest.TestCase, MergeListsTest):
|
|||
}
|
||||
}
|
||||
|
||||
def test_network_has_none_value(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: {
|
||||
'default': None
|
||||
}},
|
||||
{self.config_name: {
|
||||
'default': {
|
||||
'aliases': []
|
||||
}
|
||||
}},
|
||||
DEFAULT_VERSION)
|
||||
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': []
|
||||
}
|
||||
}
|
||||
|
||||
def test_all_properties(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: {
|
||||
|
|
Loading…
Reference in New Issue