Merge pull request #6726 from docker/bump-1.25.0-rc1

Bump 1.25.0-rc1
This commit is contained in:
Ulysses Souza 2019-05-24 00:33:04 +02:00 committed by GitHub
commit 2bc4161526
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 1141 additions and 369 deletions

View File

@ -13,7 +13,7 @@ jobs:
command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0 command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
- run: - run:
name: unit tests name: unit tests
command: tox -e py27,py36,py37 -- tests/unit command: tox -e py27,py37 -- tests/unit
build-osx-binary: build-osx-binary:
macos: macos:

View File

@ -1,7 +1,76 @@
Change log Change log
========== ==========
1.24.0 (2019-03-22) 1.25.0 (2019-05-22)
-------------------
### Features
- Add tag `docker-compose:latest`
- Add `docker-compose:<version>-alpine` image/tag
- Add `docker-compose:<version>-debian` image/tag
- Bumped `docker-py` 4.0.1
- Supports `requests` up to 2.22.0 version
- Drops empty tag on `build:cache_from`
- `Dockerfile` now generates `libmusl` binaries for alpine
- Only pull images that can't be built
- Attribute `scale` can now accept `0` as a value
- Added `--quiet` build flag
- Added `--no-interpolate` to `docker-compose config`
- Bump OpenSSL for macOS build (`1.1.0j` to `1.1.1a`)
- Added `--no-rm` to `build` command
- Added support for `credential_spec`
- Resolve digests without pulling image
- Upgrade `pyyaml` to `4.2b1`
- Lowered severity to `warning` if `down` tries to remove nonexisting image
- Use improved API fields for project events when possible
- Update `setup.py` for modern `pypi/setuptools` and remove `pandoc` dependencies
- Removed `Dockerfile.armhf` which is no longer needed
### Bugfixes
- Fixed `--remove-orphans` when used with `up --no-start`
- Fixed `docker-compose ps --all`
- Fixed `depends_on` dependency recreation behavior
- Fixed bash completion for `build --memory`
- Fixed misleading warning concerning env vars when performing an `exec` command
- Fixed failure check in parallel_execute_watch
- Fixed race condition after pulling image
- Fixed error on duplicate mount points.
- Fixed merge on networks section
- Always connect Compose container to `stdin`
- Fixed the presentation of failed services on 'docker-compose start' when containers are not available
1.24.0 (2019-03-28)
------------------- -------------------
### Features ### Features

View File

@ -1,36 +1,71 @@
FROM docker:18.06.1 as docker ARG DOCKER_VERSION=18.09.5
FROM python:3.6 ARG PYTHON_VERSION=3.7.3
ARG BUILD_ALPINE_VERSION=3.9
ARG BUILD_DEBIAN_VERSION=slim-stretch
ARG RUNTIME_ALPINE_VERSION=3.9.3
ARG RUNTIME_DEBIAN_VERSION=stretch-20190326-slim
RUN set -ex; \ ARG BUILD_PLATFORM=alpine
apt-get update -qq; \
apt-get install -y \
locales \
python-dev \
git
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker FROM docker:${DOCKER_VERSION} AS docker-cli
# Python3 requires a valid locale FROM python:${PYTHON_VERSION}-alpine${BUILD_ALPINE_VERSION} AS build-alpine
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen RUN apk add --no-cache \
ENV LANG en_US.UTF-8 bash \
build-base \
ca-certificates \
curl \
gcc \
git \
libc-dev \
libffi-dev \
libgcc \
make \
musl-dev \
openssl \
openssl-dev \
python2 \
python2-dev \
zlib-dev
ENV BUILD_BOOTLOADER=1
RUN useradd -d /home/user -m -s /bin/bash user FROM python:${PYTHON_VERSION}-${BUILD_DEBIAN_VERSION} AS build-debian
RUN apt-get update && apt-get install -y \
curl \
gcc \
git \
libc-dev \
libgcc-6-dev \
make \
openssl \
python2.7-dev
FROM build-${BUILD_PLATFORM} AS build
COPY docker-compose-entrypoint.sh /usr/local/bin/
ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
WORKDIR /code/ WORKDIR /code/
# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed # FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
RUN pip install virtualenv==16.2.0 RUN pip install virtualenv==16.2.0
RUN pip install tox==2.1.1 RUN pip install tox==2.9.1
ADD requirements.txt /code/ COPY requirements.txt .
ADD requirements-dev.txt /code/ COPY requirements-dev.txt .
ADD .pre-commit-config.yaml /code/ COPY .pre-commit-config.yaml .
ADD setup.py /code/ COPY tox.ini .
ADD tox.ini /code/ COPY setup.py .
ADD compose /code/compose/ COPY README.md .
ADD README.md /code/ COPY compose compose/
RUN tox --notest RUN tox --notest
COPY . .
ARG GIT_COMMIT=unknown
ENV DOCKER_COMPOSE_GITSHA=$GIT_COMMIT
RUN script/build/linux-entrypoint
ADD . /code/ FROM alpine:${RUNTIME_ALPINE_VERSION} AS runtime-alpine
RUN chown -R user /code/ FROM debian:${RUNTIME_DEBIAN_VERSION} AS runtime-debian
FROM runtime-${BUILD_PLATFORM} AS runtime
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"] COPY docker-compose-entrypoint.sh /usr/local/bin/
ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
COPY --from=build /usr/local/bin/docker-compose /usr/local/bin/docker-compose

View File

@ -1,39 +0,0 @@
FROM python:3.6
RUN set -ex; \
apt-get update -qq; \
apt-get install -y \
locales \
curl \
python-dev \
git
RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
SHA256=f8de6378dad825b9fd5c3c2f949e791d22f918623c27a72c84fd6975a0e5d0a2; \
echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
mv docker /usr/local/bin/docker && \
chmod +x /usr/local/bin/docker && \
rm dockerbins.tgz
# Python3 requires a valid locale
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
ENV LANG en_US.UTF-8
RUN useradd -d /home/user -m -s /bin/bash user
WORKDIR /code/
RUN pip install tox==2.1.1
ADD requirements.txt /code/
ADD requirements-dev.txt /code/
ADD .pre-commit-config.yaml /code/
ADD setup.py /code/
ADD tox.ini /code/
ADD compose /code/compose/
RUN tox --notest
ADD . /code/
RUN chown -R user /code/
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]

View File

@ -1,19 +0,0 @@
FROM docker:18.06.1 as docker
FROM alpine:3.8
ENV GLIBC 2.28-r0
RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
apk add --no-cache glibc-$GLIBC.apk && \
ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib && \
ln -s /usr/lib/libgcc_s.so.1 /usr/glibc-compat/lib && \
rm /etc/apk/keys/sgerrand.rsa.pub glibc-$GLIBC.apk && \
apk del curl
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
ENTRYPOINT ["docker-compose"]

55
Jenkinsfile vendored
View File

@ -1,29 +1,38 @@
#!groovy #!groovy
def image def buildImage = { String baseImage ->
def image
def buildImage = { ->
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) { wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
stage("build image") { stage("build image for \"${baseImage}\"") {
checkout(scm) checkout(scm)
def imageName = "dockerbuildbot/compose:${gitCommit()}" def imageName = "dockerbuildbot/compose:${baseImage}-${gitCommit()}"
image = docker.image(imageName) image = docker.image(imageName)
try { try {
image.pull() image.pull()
} catch (Exception exc) { } catch (Exception exc) {
image = docker.build(imageName, ".") sh """GIT_COMMIT=\$(script/build/write-git-sha) && \\
image.push() docker build -t ${imageName} \\
--target build \\
--build-arg BUILD_PLATFORM="${baseImage}" \\
--build-arg GIT_COMMIT="${GIT_COMMIT}" \\
.\\
"""
sh "docker push ${imageName}"
echo "${imageName}"
return imageName
} }
} }
} }
echo "image.id: ${image.id}"
return image.id
} }
def get_versions = { int number -> def get_versions = { String imageId, int number ->
def docker_versions def docker_versions
wrappedNode(label: "ubuntu && !zfs") { wrappedNode(label: "ubuntu && !zfs") {
def result = sh(script: """docker run --rm \\ def result = sh(script: """docker run --rm \\
--entrypoint=/code/.tox/py27/bin/python \\ --entrypoint=/code/.tox/py27/bin/python \\
${image.id} \\ ${imageId} \\
/code/script/test/versions.py -n ${number} docker/docker-ce recent /code/script/test/versions.py -n ${number} docker/docker-ce recent
""", returnStdout: true """, returnStdout: true
) )
@ -35,9 +44,11 @@ def get_versions = { int number ->
def runTests = { Map settings -> def runTests = { Map settings ->
def dockerVersions = settings.get("dockerVersions", null) def dockerVersions = settings.get("dockerVersions", null)
def pythonVersions = settings.get("pythonVersions", null) def pythonVersions = settings.get("pythonVersions", null)
def baseImage = settings.get("baseImage", null)
def imageName = settings.get("image", null)
if (!pythonVersions) { if (!pythonVersions) {
throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py36')`") throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py37')`")
} }
if (!dockerVersions) { if (!dockerVersions) {
throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`") throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
@ -45,7 +56,7 @@ def runTests = { Map settings ->
{ -> { ->
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) { wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
stage("test python=${pythonVersions} / docker=${dockerVersions}") { stage("test python=${pythonVersions} / docker=${dockerVersions} / baseImage=${baseImage}") {
checkout(scm) checkout(scm)
def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim() def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
echo "Using local system's storage driver: ${storageDriver}" echo "Using local system's storage driver: ${storageDriver}"
@ -55,13 +66,13 @@ def runTests = { Map settings ->
--privileged \\ --privileged \\
--volume="\$(pwd)/.git:/code/.git" \\ --volume="\$(pwd)/.git:/code/.git" \\
--volume="/var/run/docker.sock:/var/run/docker.sock" \\ --volume="/var/run/docker.sock:/var/run/docker.sock" \\
-e "TAG=${image.id}" \\ -e "TAG=${imageName}" \\
-e "STORAGE_DRIVER=${storageDriver}" \\ -e "STORAGE_DRIVER=${storageDriver}" \\
-e "DOCKER_VERSIONS=${dockerVersions}" \\ -e "DOCKER_VERSIONS=${dockerVersions}" \\
-e "BUILD_NUMBER=\$BUILD_TAG" \\ -e "BUILD_NUMBER=\$BUILD_TAG" \\
-e "PY_TEST_VERSIONS=${pythonVersions}" \\ -e "PY_TEST_VERSIONS=${pythonVersions}" \\
--entrypoint="script/test/ci" \\ --entrypoint="script/test/ci" \\
${image.id} \\ ${imageName} \\
--verbose --verbose
""" """
} }
@ -69,16 +80,16 @@ def runTests = { Map settings ->
} }
} }
buildImage()
def testMatrix = [failFast: true] def testMatrix = [failFast: true]
def docker_versions = get_versions(2) def baseImages = ['alpine', 'debian']
def pythonVersions = ['py27', 'py37']
for (int i = 0; i < docker_versions.length; i++) { baseImages.each { baseImage ->
def dockerVersion = docker_versions[i] def imageName = buildImage(baseImage)
testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"]) get_versions(imageName, 2).each { dockerVersion ->
testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"]) pythonVersions.each { pyVersion ->
testMatrix["${dockerVersion}_py37"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py37"]) testMatrix["${baseImage}_${dockerVersion}_${pyVersion}"] = runTests([baseImage: baseImage, image: imageName, dockerVersions: dockerVersion, pythonVersions: pyVersion])
}
}
} }
parallel(testMatrix) parallel(testMatrix)

View File

@ -11,9 +11,8 @@
[Org] [Org]
[Org."Core maintainers"] [Org."Core maintainers"]
people = [ people = [
"mefyl", "rumpl",
"mnottale", "ulyssessouza",
"shin-",
] ]
[Org.Alumni] [Org.Alumni]
people = [ people = [
@ -34,6 +33,10 @@
# including muti-file support, variable interpolation, secrets # including muti-file support, variable interpolation, secrets
# emulation and many more # emulation and many more
"dnephin", "dnephin",
"shin-",
"mefyl",
"mnottale",
] ]
[people] [people]
@ -74,7 +77,17 @@
Email = "mazz@houseofmnowster.com" Email = "mazz@houseofmnowster.com"
GitHub = "mnowster" GitHub = "mnowster"
[People.shin-] [people.rumpl]
Name = "Djordje Lukic"
Email = "djordje.lukic@docker.com"
GitHub = "rumpl"
[people.shin-]
Name = "Joffrey F" Name = "Joffrey F"
Email = "joffrey@docker.com" Email = "f.joffrey@gmail.com"
GitHub = "shin-" GitHub = "shin-"
[people.ulyssessouza]
Name = "Ulysses Domiciano Souza"
Email = "ulysses.souza@docker.com"
GitHub = "ulyssessouza"

View File

@ -2,15 +2,15 @@
version: '{branch}-{build}' version: '{branch}-{build}'
install: install:
- "SET PATH=C:\\Python36-x64;C:\\Python36-x64\\Scripts;%PATH%" - "SET PATH=C:\\Python37-x64;C:\\Python37-x64\\Scripts;%PATH%"
- "python --version" - "python --version"
- "pip install tox==2.9.1 virtualenv==15.1.0" - "pip install tox==2.9.1 virtualenv==16.2.0"
# Build the binary after tests # Build the binary after tests
build: false build: false
test_script: test_script:
- "tox -e py27,py36,py37 -- tests/unit" - "tox -e py27,py37 -- tests/unit"
- ps: ".\\script\\build\\windows.ps1" - ps: ".\\script\\build\\windows.ps1"
artifacts: artifacts:

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
__version__ = '1.24.0' __version__ = '1.25.0-rc1'

View File

@ -95,19 +95,10 @@ def get_image_digest(service, allow_push=False):
if separator == '@': if separator == '@':
return service.options['image'] return service.options['image']
try: digest = get_digest(service)
image = service.image()
except NoSuchImageError:
action = 'build' if 'build' in service.options else 'pull'
raise UserError(
"Image not found for service '{service}'. "
"You might need to run `docker-compose {action} {service}`."
.format(service=service.name, action=action))
if image['RepoDigests']: if digest:
# TODO: pick a digest based on the image tag if there are multiple return digest
# digests
return image['RepoDigests'][0]
if 'build' not in service.options: if 'build' not in service.options:
raise NeedsPull(service.image_name, service.name) raise NeedsPull(service.image_name, service.name)
@ -118,6 +109,32 @@ def get_image_digest(service, allow_push=False):
return push_image(service) return push_image(service)
def get_digest(service):
digest = None
try:
image = service.image()
# TODO: pick a digest based on the image tag if there are multiple
# digests
if image['RepoDigests']:
digest = image['RepoDigests'][0]
except NoSuchImageError:
try:
# Fetch the image digest from the registry
distribution = service.get_image_registry_data()
if distribution['Descriptor']['digest']:
digest = '{image_name}@{digest}'.format(
image_name=service.image_name,
digest=distribution['Descriptor']['digest']
)
except NoSuchImageError:
raise UserError(
"Digest not found for service '{service}'. "
"Repository does not exist or may require 'docker login'"
.format(service=service.name))
return digest
def push_image(service): def push_image(service):
try: try:
digest = service.push() digest = service.push()
@ -147,10 +164,10 @@ def push_image(service):
def to_bundle(config, image_digests): def to_bundle(config, image_digests):
if config.networks: if config.networks:
log.warn("Unsupported top level key 'networks' - ignoring") log.warning("Unsupported top level key 'networks' - ignoring")
if config.volumes: if config.volumes:
log.warn("Unsupported top level key 'volumes' - ignoring") log.warning("Unsupported top level key 'volumes' - ignoring")
config = denormalize_config(config) config = denormalize_config(config)
@ -175,7 +192,7 @@ def convert_service_to_bundle(name, service_dict, image_digest):
continue continue
if key not in SUPPORTED_KEYS: if key not in SUPPORTED_KEYS:
log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name)) log.warning("Unsupported key '{}' in services.{} - ignoring".format(key, name))
continue continue
if key == 'environment': if key == 'environment':
@ -222,7 +239,7 @@ def make_service_networks(name, service_dict):
for network_name, network_def in get_network_defs_for_service(service_dict).items(): for network_name, network_def in get_network_defs_for_service(service_dict).items():
for key in network_def.keys(): for key in network_def.keys():
log.warn( log.warning(
"Unsupported key '{}' in services.{}.networks.{} - ignoring" "Unsupported key '{}' in services.{}.networks.{} - ignoring"
.format(key, name, network_name)) .format(key, name, network_name))

View File

@ -21,10 +21,27 @@ from .utils import get_version_info
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
SILENT_COMMANDS = set((
'events',
'exec',
'kill',
'logs',
'pause',
'ps',
'restart',
'rm',
'start',
'stop',
'top',
'unpause',
))
def project_from_options(project_dir, options):
def project_from_options(project_dir, options, additional_options={}):
override_dir = options.get('--project-directory') override_dir = options.get('--project-directory')
environment = Environment.from_env_file(override_dir or project_dir) environment_file = options.get('--env-file')
environment = Environment.from_env_file(override_dir or project_dir, environment_file)
environment.silent = options.get('COMMAND', None) in SILENT_COMMANDS
set_parallel_limit(environment) set_parallel_limit(environment)
host = options.get('--host') host = options.get('--host')
@ -40,6 +57,7 @@ def project_from_options(project_dir, options):
environment=environment, environment=environment,
override_dir=override_dir, override_dir=override_dir,
compatibility=options.get('--compatibility'), compatibility=options.get('--compatibility'),
interpolate=(not additional_options.get('--no-interpolate'))
) )
@ -59,15 +77,17 @@ def set_parallel_limit(environment):
parallel.GlobalLimit.set_global_limit(parallel_limit) parallel.GlobalLimit.set_global_limit(parallel_limit)
def get_config_from_options(base_dir, options): def get_config_from_options(base_dir, options, additional_options={}):
override_dir = options.get('--project-directory') override_dir = options.get('--project-directory')
environment = Environment.from_env_file(override_dir or base_dir) environment_file = options.get('--env-file')
environment = Environment.from_env_file(override_dir or base_dir, environment_file)
config_path = get_config_path_from_options( config_path = get_config_path_from_options(
base_dir, options, environment base_dir, options, environment
) )
return config.load( return config.load(
config.find(base_dir, config_path, environment, override_dir), config.find(base_dir, config_path, environment, override_dir),
options.get('--compatibility') options.get('--compatibility'),
not additional_options.get('--no-interpolate')
) )
@ -105,14 +125,14 @@ def get_client(environment, verbose=False, version=None, tls_config=None, host=N
def get_project(project_dir, config_path=None, project_name=None, verbose=False, def get_project(project_dir, config_path=None, project_name=None, verbose=False,
host=None, tls_config=None, environment=None, override_dir=None, host=None, tls_config=None, environment=None, override_dir=None,
compatibility=False): compatibility=False, interpolate=True):
if not environment: if not environment:
environment = Environment.from_env_file(project_dir) environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir) config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name( project_name = get_project_name(
config_details.working_dir, project_name, environment config_details.working_dir, project_name, environment
) )
config_data = config.load(config_details, compatibility) config_data = config.load(config_details, compatibility, interpolate)
api_version = environment.get( api_version = environment.get(
'COMPOSE_API_VERSION', 'COMPOSE_API_VERSION',

View File

@ -31,7 +31,7 @@ def get_tls_version(environment):
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version) tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
if not hasattr(ssl, tls_attr_name): if not hasattr(ssl, tls_attr_name):
log.warn( log.warning(
'The "{}" protocol is unavailable. You may need to update your ' 'The "{}" protocol is unavailable. You may need to update your '
'version of Python or OpenSSL. Falling back to TLSv1 (default).' 'version of Python or OpenSSL. Falling back to TLSv1 (default).'
.format(compose_tls_version) .format(compose_tls_version)

View File

@ -208,6 +208,7 @@ class TopLevelCommand(object):
(default: the path of the Compose file) (default: the path of the Compose file)
--compatibility If set, Compose will attempt to convert keys --compatibility If set, Compose will attempt to convert keys
in v3 files to their non-Swarm equivalent in v3 files to their non-Swarm equivalent
--env-file PATH Specify an alternate environment file
Commands: Commands:
build Build or rebuild services build Build or rebuild services
@ -260,10 +261,12 @@ class TopLevelCommand(object):
--compress Compress the build context using gzip. --compress Compress the build context using gzip.
--force-rm Always remove intermediate containers. --force-rm Always remove intermediate containers.
--no-cache Do not use cache when building the image. --no-cache Do not use cache when building the image.
--no-rm Do not remove intermediate containers after a successful build.
--pull Always attempt to pull a newer version of the image. --pull Always attempt to pull a newer version of the image.
-m, --memory MEM Sets memory limit for the build container. -m, --memory MEM Sets memory limit for the build container.
--build-arg key=val Set build-time variables for services. --build-arg key=val Set build-time variables for services.
--parallel Build images in parallel. --parallel Build images in parallel.
-q, --quiet Don't print anything to STDOUT
""" """
service_names = options['SERVICE'] service_names = options['SERVICE']
build_args = options.get('--build-arg', None) build_args = options.get('--build-arg', None)
@ -273,7 +276,8 @@ class TopLevelCommand(object):
'--build-arg is only supported when services are specified for API version < 1.25.' '--build-arg is only supported when services are specified for API version < 1.25.'
' Please use a Compose file version > 2.2 or specify which services to build.' ' Please use a Compose file version > 2.2 or specify which services to build.'
) )
environment = Environment.from_env_file(self.project_dir) environment_file = options.get('--env-file')
environment = Environment.from_env_file(self.project_dir, environment_file)
build_args = resolve_build_args(build_args, environment) build_args = resolve_build_args(build_args, environment)
self.project.build( self.project.build(
@ -282,9 +286,11 @@ class TopLevelCommand(object):
pull=bool(options.get('--pull', False)), pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)), force_rm=bool(options.get('--force-rm', False)),
memory=options.get('--memory'), memory=options.get('--memory'),
rm=not bool(options.get('--no-rm', False)),
build_args=build_args, build_args=build_args,
gzip=options.get('--compress', False), gzip=options.get('--compress', False),
parallel_build=options.get('--parallel', False), parallel_build=options.get('--parallel', False),
silent=options.get('--quiet', False)
) )
def bundle(self, options): def bundle(self, options):
@ -327,6 +333,7 @@ class TopLevelCommand(object):
Options: Options:
--resolve-image-digests Pin image tags to digests. --resolve-image-digests Pin image tags to digests.
--no-interpolate Don't interpolate environment variables
-q, --quiet Only validate the configuration, don't print -q, --quiet Only validate the configuration, don't print
anything. anything.
--services Print the service names, one per line. --services Print the service names, one per line.
@ -336,11 +343,12 @@ class TopLevelCommand(object):
or use the wildcard symbol to display all services or use the wildcard symbol to display all services
""" """
compose_config = get_config_from_options('.', self.toplevel_options) additional_options = {'--no-interpolate': options.get('--no-interpolate')}
compose_config = get_config_from_options('.', self.toplevel_options, additional_options)
image_digests = None image_digests = None
if options['--resolve-image-digests']: if options['--resolve-image-digests']:
self.project = project_from_options('.', self.toplevel_options) self.project = project_from_options('.', self.toplevel_options, additional_options)
with errors.handle_connection_errors(self.project.client): with errors.handle_connection_errors(self.project.client):
image_digests = image_digests_for_project(self.project) image_digests = image_digests_for_project(self.project)
@ -357,14 +365,14 @@ class TopLevelCommand(object):
if options['--hash'] is not None: if options['--hash'] is not None:
h = options['--hash'] h = options['--hash']
self.project = project_from_options('.', self.toplevel_options) self.project = project_from_options('.', self.toplevel_options, additional_options)
services = [svc for svc in options['--hash'].split(',')] if h != '*' else None services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
with errors.handle_connection_errors(self.project.client): with errors.handle_connection_errors(self.project.client):
for service in self.project.get_services(services): for service in self.project.get_services(services):
print('{} {}'.format(service.name, service.config_hash)) print('{} {}'.format(service.name, service.config_hash))
return return
print(serialize_config(compose_config, image_digests)) print(serialize_config(compose_config, image_digests, not options['--no-interpolate']))
def create(self, options): def create(self, options):
""" """
@ -383,7 +391,7 @@ class TopLevelCommand(object):
""" """
service_names = options['SERVICE'] service_names = options['SERVICE']
log.warn( log.warning(
'The create command is deprecated. ' 'The create command is deprecated. '
'Use the up command with the --no-start flag instead.' 'Use the up command with the --no-start flag instead.'
) )
@ -421,8 +429,10 @@ class TopLevelCommand(object):
Compose file Compose file
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds. -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10) (default: 10)
--env-file PATH Specify an alternate environment file
""" """
environment = Environment.from_env_file(self.project_dir) environment_file = options.get('--env-file')
environment = Environment.from_env_file(self.project_dir, environment_file)
ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS') ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and options['--remove-orphans']: if ignore_orphans and options['--remove-orphans']:
@ -479,8 +489,10 @@ class TopLevelCommand(object):
-e, --env KEY=VAL Set environment variables (can be used multiple times, -e, --env KEY=VAL Set environment variables (can be used multiple times,
not supported in API < 1.25) not supported in API < 1.25)
-w, --workdir DIR Path to workdir directory for this command. -w, --workdir DIR Path to workdir directory for this command.
--env-file PATH Specify an alternate environment file
""" """
environment = Environment.from_env_file(self.project_dir) environment_file = options.get('--env-file')
environment = Environment.from_env_file(self.project_dir, environment_file)
use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI') use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
index = int(options.get('--index')) index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE']) service = self.project.get_service(options['SERVICE'])
@ -709,7 +721,8 @@ class TopLevelCommand(object):
if options['--all']: if options['--all']:
containers = sorted(self.project.containers(service_names=options['SERVICE'], containers = sorted(self.project.containers(service_names=options['SERVICE'],
one_off=OneOffFilter.include, stopped=True)) one_off=OneOffFilter.include, stopped=True),
key=attrgetter('name'))
else: else:
containers = sorted( containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) + self.project.containers(service_names=options['SERVICE'], stopped=True) +
@ -753,7 +766,7 @@ class TopLevelCommand(object):
--include-deps Also pull services declared as dependencies --include-deps Also pull services declared as dependencies
""" """
if options.get('--parallel'): if options.get('--parallel'):
log.warn('--parallel option is deprecated and will be removed in future versions.') log.warning('--parallel option is deprecated and will be removed in future versions.')
self.project.pull( self.project.pull(
service_names=options['SERVICE'], service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures'), ignore_pull_failures=options.get('--ignore-pull-failures'),
@ -794,7 +807,7 @@ class TopLevelCommand(object):
-a, --all Deprecated - no effect. -a, --all Deprecated - no effect.
""" """
if options.get('--all'): if options.get('--all'):
log.warn( log.warning(
'--all flag is obsolete. This is now the default behavior ' '--all flag is obsolete. This is now the default behavior '
'of `docker-compose rm`' 'of `docker-compose rm`'
) )
@ -904,7 +917,7 @@ class TopLevelCommand(object):
'Use the up command with the --scale flag instead.' 'Use the up command with the --scale flag instead.'
) )
else: else:
log.warn( log.warning(
'The scale command is deprecated. ' 'The scale command is deprecated. '
'Use the up command with the --scale flag instead.' 'Use the up command with the --scale flag instead.'
) )
@ -1036,6 +1049,7 @@ class TopLevelCommand(object):
container. Implies --abort-on-container-exit. container. Implies --abort-on-container-exit.
--scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the --scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the
`scale` setting in the Compose file if present. `scale` setting in the Compose file if present.
--env-file PATH Specify an alternate environment file
""" """
start_deps = not options['--no-deps'] start_deps = not options['--no-deps']
always_recreate_deps = options['--always-recreate-deps'] always_recreate_deps = options['--always-recreate-deps']
@ -1050,7 +1064,8 @@ class TopLevelCommand(object):
if detached and (cascade_stop or exit_value_from): if detached and (cascade_stop or exit_value_from):
raise UserError("--abort-on-container-exit and -d cannot be combined.") raise UserError("--abort-on-container-exit and -d cannot be combined.")
environment = Environment.from_env_file(self.project_dir) environment_file = options.get('--env-file')
environment = Environment.from_env_file(self.project_dir, environment_file)
ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS') ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and remove_orphans: if ignore_orphans and remove_orphans:
@ -1236,7 +1251,7 @@ def exitval_from_opts(options, project):
exit_value_from = options.get('--exit-code-from') exit_value_from = options.get('--exit-code-from')
if exit_value_from: if exit_value_from:
if not options.get('--abort-on-container-exit'): if not options.get('--abort-on-container-exit'):
log.warn('using --exit-code-from implies --abort-on-container-exit') log.warning('using --exit-code-from implies --abort-on-container-exit')
options['--abort-on-container-exit'] = True options['--abort-on-container-exit'] = True
if exit_value_from not in [s.name for s in project.get_services()]: if exit_value_from not in [s.name for s in project.get_services()]:
log.error('No service named "%s" was found in your compose file.', log.error('No service named "%s" was found in your compose file.',
@ -1343,7 +1358,8 @@ def run_one_off_container(container_options, project, service, options, toplevel
if options['--rm']: if options['--rm']:
project.client.remove_container(container.id, force=True, v=True) project.client.remove_container(container.id, force=True, v=True)
environment = Environment.from_env_file(project_dir) environment_file = options.get('--env-file')
environment = Environment.from_env_file(project_dir, environment_file)
use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI') use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
signals.set_signal_handler_to_shutdown() signals.set_signal_handler_to_shutdown()
@ -1565,7 +1581,7 @@ def warn_for_swarm_mode(client):
# UCP does multi-node scheduling with traditional Compose files. # UCP does multi-node scheduling with traditional Compose files.
return return
log.warn( log.warning(
"The Docker Engine you're using is running in swarm mode.\n\n" "The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. " "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n" "All containers will be scheduled on the current node.\n\n"

View File

@ -198,9 +198,9 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
version = self.config['version'] version = self.config['version']
if isinstance(version, dict): if isinstance(version, dict):
log.warn('Unexpected type for "version" key in "{}". Assuming ' log.warning('Unexpected type for "version" key in "{}". Assuming '
'"version" is the name of a service, and defaulting to ' '"version" is the name of a service, and defaulting to '
'Compose file version 1.'.format(self.filename)) 'Compose file version 1.'.format(self.filename))
return V1 return V1
if not isinstance(version, six.string_types): if not isinstance(version, six.string_types):
@ -318,8 +318,8 @@ def get_default_config_files(base_dir):
winner = candidates[0] winner = candidates[0]
if len(candidates) > 1: if len(candidates) > 1:
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates)) log.warning("Found multiple config files with supported names: %s", ", ".join(candidates))
log.warn("Using %s\n", winner) log.warning("Using %s\n", winner)
return [os.path.join(path, winner)] + get_default_override_file(path) return [os.path.join(path, winner)] + get_default_override_file(path)
@ -362,7 +362,7 @@ def check_swarm_only_config(service_dicts, compatibility=False):
def check_swarm_only_key(service_dicts, key): def check_swarm_only_key(service_dicts, key):
services = [s for s in service_dicts if s.get(key)] services = [s for s in service_dicts if s.get(key)]
if services: if services:
log.warn( log.warning(
warning_template.format( warning_template.format(
services=", ".join(sorted(s['name'] for s in services)), services=", ".join(sorted(s['name'] for s in services)),
key=key key=key
@ -373,7 +373,7 @@ def check_swarm_only_config(service_dicts, compatibility=False):
check_swarm_only_key(service_dicts, 'configs') check_swarm_only_key(service_dicts, 'configs')
def load(config_details, compatibility=False): def load(config_details, compatibility=False, interpolate=True):
"""Load the configuration from a working directory and a list of """Load the configuration from a working directory and a list of
configuration files. Files are loaded in order, and merged on top configuration files. Files are loaded in order, and merged on top
of each other to create the final configuration. of each other to create the final configuration.
@ -383,7 +383,7 @@ def load(config_details, compatibility=False):
validate_config_version(config_details.config_files) validate_config_version(config_details.config_files)
processed_files = [ processed_files = [
process_config_file(config_file, config_details.environment) process_config_file(config_file, config_details.environment, interpolate=interpolate)
for config_file in config_details.config_files for config_file in config_details.config_files
] ]
config_details = config_details._replace(config_files=processed_files) config_details = config_details._replace(config_files=processed_files)
@ -505,7 +505,6 @@ def load_services(config_details, config_file, compatibility=False):
def interpolate_config_section(config_file, config, section, environment): def interpolate_config_section(config_file, config, section, environment):
validate_config_section(config_file.filename, config, section)
return interpolate_environment_variables( return interpolate_environment_variables(
config_file.version, config_file.version,
config, config,
@ -514,38 +513,60 @@ def interpolate_config_section(config_file, config, section, environment):
) )
def process_config_file(config_file, environment, service_name=None): def process_config_section(config_file, config, section, environment, interpolate):
services = interpolate_config_section( validate_config_section(config_file.filename, config, section)
if interpolate:
return interpolate_environment_variables(
config_file.version,
config,
section,
environment
)
else:
return config
def process_config_file(config_file, environment, service_name=None, interpolate=True):
services = process_config_section(
config_file, config_file,
config_file.get_service_dicts(), config_file.get_service_dicts(),
'service', 'service',
environment) environment,
interpolate,
)
if config_file.version > V1: if config_file.version > V1:
processed_config = dict(config_file.config) processed_config = dict(config_file.config)
processed_config['services'] = services processed_config['services'] = services
processed_config['volumes'] = interpolate_config_section( processed_config['volumes'] = process_config_section(
config_file, config_file,
config_file.get_volumes(), config_file.get_volumes(),
'volume', 'volume',
environment) environment,
processed_config['networks'] = interpolate_config_section( interpolate,
)
processed_config['networks'] = process_config_section(
config_file, config_file,
config_file.get_networks(), config_file.get_networks(),
'network', 'network',
environment) environment,
interpolate,
)
if config_file.version >= const.COMPOSEFILE_V3_1: if config_file.version >= const.COMPOSEFILE_V3_1:
processed_config['secrets'] = interpolate_config_section( processed_config['secrets'] = process_config_section(
config_file, config_file,
config_file.get_secrets(), config_file.get_secrets(),
'secret', 'secret',
environment) environment,
interpolate,
)
if config_file.version >= const.COMPOSEFILE_V3_3: if config_file.version >= const.COMPOSEFILE_V3_3:
processed_config['configs'] = interpolate_config_section( processed_config['configs'] = process_config_section(
config_file, config_file,
config_file.get_configs(), config_file.get_configs(),
'config', 'config',
environment environment,
interpolate,
) )
else: else:
processed_config = services processed_config = services
@ -900,7 +921,7 @@ def finalize_service(service_config, service_names, version, environment, compat
service_dict service_dict
) )
if ignored_keys: if ignored_keys:
log.warn( log.warning(
'The following deploy sub-keys are not supported in compatibility mode and have' 'The following deploy sub-keys are not supported in compatibility mode and have'
' been ignored: {}'.format(', '.join(ignored_keys)) ' been ignored: {}'.format(', '.join(ignored_keys))
) )

View File

@ -56,14 +56,18 @@ class Environment(dict):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(Environment, self).__init__(*args, **kwargs) super(Environment, self).__init__(*args, **kwargs)
self.missing_keys = [] self.missing_keys = []
self.silent = False
@classmethod @classmethod
def from_env_file(cls, base_dir): def from_env_file(cls, base_dir, env_file=None):
def _initialize(): def _initialize():
result = cls() result = cls()
if base_dir is None: if base_dir is None:
return result return result
env_file_path = os.path.join(base_dir, '.env') if env_file:
env_file_path = os.path.join(base_dir, env_file)
else:
env_file_path = os.path.join(base_dir, '.env')
try: try:
return cls(env_vars_from_file(env_file_path)) return cls(env_vars_from_file(env_file_path))
except EnvFileNotFound: except EnvFileNotFound:
@ -95,8 +99,8 @@ class Environment(dict):
return super(Environment, self).__getitem__(key.upper()) return super(Environment, self).__getitem__(key.upper())
except KeyError: except KeyError:
pass pass
if key not in self.missing_keys: if not self.silent and key not in self.missing_keys:
log.warn( log.warning(
"The {} variable is not set. Defaulting to a blank string." "The {} variable is not set. Defaulting to a blank string."
.format(key) .format(key)
) )

View File

@ -24,14 +24,12 @@ def serialize_dict_type(dumper, data):
def serialize_string(dumper, data): def serialize_string(dumper, data):
""" Ensure boolean-like strings are quoted in the output and escape $ characters """ """ Ensure boolean-like strings are quoted in the output """
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
if isinstance(data, six.binary_type): if isinstance(data, six.binary_type):
data = data.decode('utf-8') data = data.decode('utf-8')
data = data.replace('$', '$$')
if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'): if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
# Empirically only y/n appears to be an issue, but this might change # Empirically only y/n appears to be an issue, but this might change
# depending on which PyYaml version is being used. Err on safe side. # depending on which PyYaml version is being used. Err on safe side.
@ -39,6 +37,12 @@ def serialize_string(dumper, data):
return representer(data) return representer(data)
def serialize_string_escape_dollar(dumper, data):
""" Ensure boolean-like strings are quoted in the output and escape $ characters """
data = data.replace('$', '$$')
return serialize_string(dumper, data)
yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type) yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type) yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type) yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
@ -46,8 +50,6 @@ yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type)
yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type) yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type) yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type) yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
yaml.SafeDumper.add_representer(str, serialize_string)
yaml.SafeDumper.add_representer(six.text_type, serialize_string)
def denormalize_config(config, image_digests=None): def denormalize_config(config, image_digests=None):
@ -93,7 +95,13 @@ def v3_introduced_name_key(key):
return V3_5 return V3_5
def serialize_config(config, image_digests=None): def serialize_config(config, image_digests=None, escape_dollar=True):
if escape_dollar:
yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar)
yaml.SafeDumper.add_representer(six.text_type, serialize_string_escape_dollar)
else:
yaml.SafeDumper.add_representer(str, serialize_string)
yaml.SafeDumper.add_representer(six.text_type, serialize_string)
return yaml.safe_dump( return yaml.safe_dump(
denormalize_config(config, image_digests), denormalize_config(config, image_digests),
default_flow_style=False, default_flow_style=False,

View File

@ -231,7 +231,7 @@ def check_remote_network_config(remote, local):
if k.startswith('com.docker.'): # We are only interested in user-specified labels if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue continue
if remote_labels.get(k) != local_labels.get(k): if remote_labels.get(k) != local_labels.get(k):
log.warn( log.warning(
'Network {}: label "{}" has changed. It may need to be' 'Network {}: label "{}" has changed. It may need to be'
' recreated.'.format(local.true_name, k) ' recreated.'.format(local.true_name, k)
) )
@ -276,7 +276,7 @@ class ProjectNetworks(object):
} }
unused = set(networks) - set(service_networks) - {'default'} unused = set(networks) - set(service_networks) - {'default'}
if unused: if unused:
log.warn( log.warning(
"Some networks were defined but are not used by any service: " "Some networks were defined but are not used by any service: "
"{}".format(", ".join(unused))) "{}".format(", ".join(unused)))
return cls(service_networks, use_networking) return cls(service_networks, use_networking)
@ -288,7 +288,7 @@ class ProjectNetworks(object):
try: try:
network.remove() network.remove()
except NotFound: except NotFound:
log.warn("Network %s not found.", network.true_name) log.warning("Network %s not found.", network.true_name)
def initialize(self): def initialize(self):
if not self.use_networking: if not self.use_networking:

View File

@ -355,18 +355,17 @@ class Project(object):
return containers return containers
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None, def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
build_args=None, gzip=False, parallel_build=False): build_args=None, gzip=False, parallel_build=False, rm=True, silent=False):
services = [] services = []
for service in self.get_services(service_names): for service in self.get_services(service_names):
if service.can_be_built(): if service.can_be_built():
services.append(service) services.append(service)
else: elif not silent:
log.info('%s uses an image, skipping' % service.name) log.info('%s uses an image, skipping' % service.name)
def build_service(service): def build_service(service):
service.build(no_cache, pull, force_rm, memory, build_args, gzip) service.build(no_cache, pull, force_rm, memory, build_args, gzip, rm, silent)
if parallel_build: if parallel_build:
_, errors = parallel.parallel_execute( _, errors = parallel.parallel_execute(
services, services,
@ -587,8 +586,10 @@ class Project(object):
", ".join(updated_dependencies)) ", ".join(updated_dependencies))
containers_stopped = any( containers_stopped = any(
service.containers(stopped=True, filters={'status': ['created', 'exited']})) service.containers(stopped=True, filters={'status': ['created', 'exited']}))
has_links = any(c.get('HostConfig.Links') for c in service.containers()) service_has_links = any(service.get_link_names())
if always_recreate_deps or containers_stopped or not has_links: container_has_links = any(c.get('HostConfig.Links') for c in service.containers())
should_recreate_for_links = service_has_links ^ container_has_links
if always_recreate_deps or containers_stopped or should_recreate_for_links:
plan = service.convergence_plan(ConvergenceStrategy.always) plan = service.convergence_plan(ConvergenceStrategy.always)
else: else:
plan = service.convergence_plan(strategy) plan = service.convergence_plan(strategy)
@ -602,6 +603,9 @@ class Project(object):
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False, def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
include_deps=False): include_deps=False):
services = self.get_services(service_names, include_deps) services = self.get_services(service_names, include_deps)
images_to_build = {service.image_name for service in services if service.can_be_built()}
services_to_pull = [service for service in services if service.image_name not in images_to_build]
msg = not silent and 'Pulling' or None msg = not silent and 'Pulling' or None
if parallel_pull: if parallel_pull:
@ -627,7 +631,7 @@ class Project(object):
) )
_, errors = parallel.parallel_execute( _, errors = parallel.parallel_execute(
services, services_to_pull,
pull_service, pull_service,
operator.attrgetter('name'), operator.attrgetter('name'),
msg, msg,
@ -640,7 +644,7 @@ class Project(object):
raise ProjectError(combined_errors) raise ProjectError(combined_errors)
else: else:
for service in services: for service in services_to_pull:
service.pull(ignore_pull_failures, silent=silent) service.pull(ignore_pull_failures, silent=silent)
def push(self, service_names=None, ignore_push_failures=False): def push(self, service_names=None, ignore_push_failures=False):
@ -686,7 +690,7 @@ class Project(object):
def find_orphan_containers(self, remove_orphans): def find_orphan_containers(self, remove_orphans):
def _find(): def _find():
containers = self._labeled_containers() containers = set(self._labeled_containers() + self._labeled_containers(stopped=True))
for ctnr in containers: for ctnr in containers:
service_name = ctnr.labels.get(LABEL_SERVICE) service_name = ctnr.labels.get(LABEL_SERVICE)
if service_name not in self.service_names: if service_name not in self.service_names:
@ -697,7 +701,10 @@ class Project(object):
if remove_orphans: if remove_orphans:
for ctnr in orphans: for ctnr in orphans:
log.info('Removing orphan container "{0}"'.format(ctnr.name)) log.info('Removing orphan container "{0}"'.format(ctnr.name))
ctnr.kill() try:
ctnr.kill()
except APIError:
pass
ctnr.remove(force=True) ctnr.remove(force=True)
else: else:
log.warning( log.warning(
@ -725,10 +732,11 @@ class Project(object):
def build_container_operation_with_timeout_func(self, operation, options): def build_container_operation_with_timeout_func(self, operation, options):
def container_operation_with_timeout(container): def container_operation_with_timeout(container):
if options.get('timeout') is None: _options = options.copy()
if _options.get('timeout') is None:
service = self.get_service(container.service) service = self.get_service(container.service)
options['timeout'] = service.stop_timeout(None) _options['timeout'] = service.stop_timeout(None)
return getattr(container, operation)(**options) return getattr(container, operation)(**_options)
return container_operation_with_timeout return container_operation_with_timeout
@ -771,13 +779,13 @@ def get_secrets(service, service_secrets, secret_defs):
.format(service=service, secret=secret.source)) .format(service=service, secret=secret.source))
if secret_def.get('external'): if secret_def.get('external'):
log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. " log.warning("Service \"{service}\" uses secret \"{secret}\" which is external. "
"External secrets are not available to containers created by " "External secrets are not available to containers created by "
"docker-compose.".format(service=service, secret=secret.source)) "docker-compose.".format(service=service, secret=secret.source))
continue continue
if secret.uid or secret.gid or secret.mode: if secret.uid or secret.gid or secret.mode:
log.warn( log.warning(
"Service \"{service}\" uses secret \"{secret}\" with uid, " "Service \"{service}\" uses secret \"{secret}\" with uid, "
"gid, or mode. These fields are not supported by this " "gid, or mode. These fields are not supported by this "
"implementation of the Compose file".format( "implementation of the Compose file".format(

View File

@ -59,7 +59,6 @@ from .utils import parse_seconds_float
from .utils import truncate_id from .utils import truncate_id
from .utils import unique_everseen from .utils import unique_everseen
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -177,7 +176,7 @@ class Service(object):
network_mode=None, network_mode=None,
networks=None, networks=None,
secrets=None, secrets=None,
scale=None, scale=1,
pid_mode=None, pid_mode=None,
default_platform=None, default_platform=None,
**options **options
@ -192,7 +191,7 @@ class Service(object):
self.pid_mode = pid_mode or PidMode(None) self.pid_mode = pid_mode or PidMode(None)
self.networks = networks or {} self.networks = networks or {}
self.secrets = secrets or [] self.secrets = secrets or []
self.scale_num = scale or 1 self.scale_num = scale
self.default_platform = default_platform self.default_platform = default_platform
self.options = options self.options = options
@ -241,15 +240,15 @@ class Service(object):
def show_scale_warnings(self, desired_num): def show_scale_warnings(self, desired_num):
if self.custom_container_name and desired_num > 1: if self.custom_container_name and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". ' log.warning('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. ' 'Docker requires each container to have a unique name. '
'Remove the custom name to scale the service.' 'Remove the custom name to scale the service.'
% (self.name, self.custom_container_name)) % (self.name, self.custom_container_name))
if self.specifies_host_port() and desired_num > 1: if self.specifies_host_port() and desired_num > 1:
log.warn('The "%s" service specifies a port on the host. If multiple containers ' log.warning('The "%s" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.' 'for this service are created on a single host, the port will clash.'
% self.name) % self.name)
def scale(self, desired_num, timeout=None): def scale(self, desired_num, timeout=None):
""" """
@ -358,11 +357,17 @@ class Service(object):
raise NeedsBuildError(self) raise NeedsBuildError(self)
self.build() self.build()
log.warn( log.warning(
"Image for service {} was built because it did not already exist. To " "Image for service {} was built because it did not already exist. To "
"rebuild this image you must use `docker-compose build` or " "rebuild this image you must use `docker-compose build` or "
"`docker-compose up --build`.".format(self.name)) "`docker-compose up --build`.".format(self.name))
def get_image_registry_data(self):
try:
return self.client.inspect_distribution(self.image_name)
except APIError:
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
def image(self): def image(self):
try: try:
return self.client.inspect_image(self.image_name) return self.client.inspect_image(self.image_name)
@ -680,6 +685,7 @@ class Service(object):
'links': self.get_link_names(), 'links': self.get_link_names(),
'net': self.network_mode.id, 'net': self.network_mode.id,
'networks': self.networks, 'networks': self.networks,
'secrets': self.secrets,
'volumes_from': [ 'volumes_from': [
(v.source.name, v.mode) (v.source.name, v.mode)
for v in self.volumes_from if isinstance(v.source, Service) for v in self.volumes_from if isinstance(v.source, Service)
@ -1043,8 +1049,11 @@ class Service(object):
return [build_spec(secret) for secret in self.secrets] return [build_spec(secret) for secret in self.secrets]
def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None, def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None,
gzip=False): gzip=False, rm=True, silent=False):
log.info('Building %s' % self.name) output_stream = open(os.devnull, 'w')
if not silent:
output_stream = sys.stdout
log.info('Building %s' % self.name)
build_opts = self.options.get('build', {}) build_opts = self.options.get('build', {})
@ -1064,12 +1073,12 @@ class Service(object):
build_output = self.client.build( build_output = self.client.build(
path=path, path=path,
tag=self.image_name, tag=self.image_name,
rm=True, rm=rm,
forcerm=force_rm, forcerm=force_rm,
pull=pull, pull=pull,
nocache=no_cache, nocache=no_cache,
dockerfile=build_opts.get('dockerfile', None), dockerfile=build_opts.get('dockerfile', None),
cache_from=build_opts.get('cache_from', None), cache_from=self.get_cache_from(build_opts),
labels=build_opts.get('labels', None), labels=build_opts.get('labels', None),
buildargs=build_args, buildargs=build_args,
network_mode=build_opts.get('network', None), network_mode=build_opts.get('network', None),
@ -1085,7 +1094,7 @@ class Service(object):
) )
try: try:
all_events = list(stream_output(build_output, sys.stdout)) all_events = list(stream_output(build_output, output_stream))
except StreamOutputError as e: except StreamOutputError as e:
raise BuildError(self, six.text_type(e)) raise BuildError(self, six.text_type(e))
@ -1107,6 +1116,12 @@ class Service(object):
return image_id return image_id
def get_cache_from(self, build_opts):
cache_from = build_opts.get('cache_from', None)
if cache_from is not None:
cache_from = [tag for tag in cache_from if tag]
return cache_from
def can_be_built(self): def can_be_built(self):
return 'build' in self.options return 'build' in self.options
@ -1316,7 +1331,7 @@ class ServicePidMode(PidMode):
if containers: if containers:
return 'container:' + containers[0].id return 'container:' + containers[0].id
log.warn( log.warning(
"Service %s is trying to use reuse the PID namespace " "Service %s is trying to use reuse the PID namespace "
"of another service that is not running." % (self.service_name) "of another service that is not running." % (self.service_name)
) )
@ -1379,8 +1394,8 @@ class ServiceNetworkMode(object):
if containers: if containers:
return 'container:' + containers[0].id return 'container:' + containers[0].id
log.warn("Service %s is trying to use reuse the network stack " log.warning("Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.id)) "of another service that is not running." % (self.id))
return None return None
@ -1531,7 +1546,7 @@ def warn_on_masked_volume(volumes_option, container_volumes, service):
volume.internal in container_volumes and volume.internal in container_volumes and
container_volumes.get(volume.internal) != volume.external container_volumes.get(volume.internal) != volume.external
): ):
log.warn(( log.warning((
"Service \"{service}\" is using volume \"{volume}\" from the " "Service \"{service}\" is using volume \"{volume}\" from the "
"previous container. Host mapping \"{host_path}\" has no effect. " "previous container. Host mapping \"{host_path}\" has no effect. "
"Remove the existing containers (with `docker-compose rm {service}`) " "Remove the existing containers (with `docker-compose rm {service}`) "

View File

@ -127,7 +127,7 @@ class ProjectVolumes(object):
try: try:
volume.remove() volume.remove()
except NotFound: except NotFound:
log.warn("Volume %s not found.", volume.true_name) log.warning("Volume %s not found.", volume.true_name)
def initialize(self): def initialize(self):
try: try:
@ -209,7 +209,7 @@ def check_remote_volume_config(remote, local):
if k.startswith('com.docker.'): # We are only interested in user-specified labels if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue continue
if remote_labels.get(k) != local_labels.get(k): if remote_labels.get(k) != local_labels.get(k):
log.warn( log.warning(
'Volume {}: label "{}" has changed. It may need to be' 'Volume {}: label "{}" has changed. It may need to be'
' recreated.'.format(local.name, k) ' recreated.'.format(local.name, k)
) )

View File

@ -110,11 +110,14 @@ _docker_compose_build() {
__docker_compose_nospace __docker_compose_nospace
return return
;; ;;
--memory|-m)
return
;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull --parallel" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory -m --no-cache --no-rm --pull --parallel -q --quiet" -- "$cur" ) )
;; ;;
*) *)
__docker_compose_complete_services --filter source=build __docker_compose_complete_services --filter source=build

View File

@ -113,6 +113,7 @@ __docker-compose_subcommand() {
$opts_help \ $opts_help \
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \ "*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
'--force-rm[Always remove intermediate containers.]' \ '--force-rm[Always remove intermediate containers.]' \
'(--quiet -q)'{--quiet,-q}'[Curb build output]' \
'(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \ '(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \
'--no-cache[Do not use cache when building the image.]' \ '--no-cache[Do not use cache when building the image.]' \
'--pull[Always attempt to pull a newer version of the image.]' \ '--pull[Always attempt to pull a newer version of the image.]' \

View File

@ -44,7 +44,7 @@ def warn_for_links(name, service):
links = service.get('links') links = service.get('links')
if links: if links:
example_service = links[0].partition(':')[0] example_service = links[0].partition(':')[0]
log.warn( log.warning(
"Service {name} has links, which no longer create environment " "Service {name} has links, which no longer create environment "
"variables such as {example_service_upper}_PORT. " "variables such as {example_service_upper}_PORT. "
"If you are using those in your application code, you should " "If you are using those in your application code, you should "
@ -57,7 +57,7 @@ def warn_for_links(name, service):
def warn_for_external_links(name, service): def warn_for_external_links(name, service):
external_links = service.get('external_links') external_links = service.get('external_links')
if external_links: if external_links:
log.warn( log.warning(
"Service {name} has external_links: {ext}, which now work " "Service {name} has external_links: {ext}, which now work "
"slightly differently. In particular, two containers must be " "slightly differently. In particular, two containers must be "
"connected to at least one network in common in order to " "connected to at least one network in common in order to "
@ -107,7 +107,7 @@ def rewrite_volumes_from(service, service_names):
def create_volumes_section(data): def create_volumes_section(data):
named_volumes = get_named_volumes(data['services']) named_volumes = get_named_volumes(data['services'])
if named_volumes: if named_volumes:
log.warn( log.warning(
"Named volumes ({names}) must be explicitly declared. Creating a " "Named volumes ({names}) must be explicitly declared. Creating a "
"'volumes' section with declarations.\n\n" "'volumes' section with declarations.\n\n"
"For backwards-compatibility, they've been declared as external. " "For backwards-compatibility, they've been declared as external. "

20
docker-compose-entrypoint.sh Executable file
View File

@ -0,0 +1,20 @@
#!/bin/sh
set -e
# first arg is `-f` or `--some-option`
if [ "${1#-}" != "$1" ]; then
set -- docker-compose "$@"
fi
# if our command is a valid Docker subcommand, let's invoke it through Docker instead
# (this allows for "docker run docker ps", etc)
if docker-compose help "$1" > /dev/null 2>&1; then
set -- docker-compose "$@"
fi
# if we have "--link some-docker:docker" and not DOCKER_HOST, let's set DOCKER_HOST automatically
if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then
export DOCKER_HOST='tcp://docker:2375'
fi
exec "$@"

View File

@ -6,11 +6,9 @@ The documentation for Compose has been merged into
The docs for Compose are now here: The docs for Compose are now here:
https://github.com/docker/docker.github.io/tree/master/compose https://github.com/docker/docker.github.io/tree/master/compose
Please submit pull requests for unpublished features on the `vnext-compose` branch (https://github.com/docker/docker.github.io/tree/vnext-compose). Please submit pull requests for unreleased features/changes on the `master` branch (https://github.com/docker/docker.github.io/tree/master), please prefix the PR title with `[WIP]` to indicate that it relates to an unreleased change.
If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided (coming soon - watch this space). If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided.
PRs for typos, additional information, etc. for already-published features should be labeled as `okay-to-publish` (we are still settling on a naming convention, will provide a label soon). You can submit these PRs either to `vnext-compose` or directly to `master` on `docker.github.io`
As always, the docs remain open-source and we appreciate your feedback and As always, the docs remain open-source and we appreciate your feedback and
pull requests! pull requests!

13
pyinstaller/ldd Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
# From http://wiki.musl-libc.org/wiki/FAQ#Q:_where_is_ldd_.3F
#
# Musl's dynlinker comes with ldd functionality built in. just create a
# symlink from ld-musl-$ARCH.so to /bin/ldd. If the dynlinker was started
# as "ldd", it will detect that and print the appropriate DSO information.
#
# Instead, this string replaced "ldd" with the package so that pyinstaller
# can find the actual lib.
exec /usr/bin/ldd "$@" | \
sed -r 's/([^[:space:]]+) => ldd/\1 => \/lib\/\1/g' | \
sed -r 's/ldd \(.*\)//g'

View File

@ -1 +1 @@
pyinstaller==3.3.1 pyinstaller==3.4

View File

@ -1,4 +1,5 @@
coverage==4.4.2 coverage==4.4.2
ddt==1.2.0
flake8==3.5.0 flake8==3.5.0
mock==2.0.0 mock==2.0.0
pytest==3.6.3 pytest==3.6.3

View File

@ -3,7 +3,7 @@ cached-property==1.3.0
certifi==2017.4.17 certifi==2017.4.17
chardet==3.0.4 chardet==3.0.4
colorama==0.4.0; sys_platform == 'win32' colorama==0.4.0; sys_platform == 'win32'
docker==3.7.2 docker==4.0.1
docker-pycreds==0.4.0 docker-pycreds==0.4.0
dockerpty==0.4.1 dockerpty==0.4.1
docopt==0.6.2 docopt==0.6.2
@ -17,8 +17,8 @@ pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6' pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
PySocks==1.6.7 PySocks==1.6.7
PyYAML==4.2b1 PyYAML==4.2b1
requests==2.20.0 requests==2.22.0
six==1.10.0 six==1.10.0
texttable==0.9.1 texttable==0.9.1
urllib3==1.21.1; python_version == '3.3' urllib3==1.24.2; python_version == '3.3'
websocket-client==0.32.0 websocket-client==0.32.0

View File

@ -7,11 +7,14 @@ if [ -z "$1" ]; then
exit 1 exit 1
fi fi
TAG=$1 TAG="$1"
VERSION="$(python setup.py --version)" VERSION="$(python setup.py --version)"
./script/build/write-git-sha DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
python setup.py sdist bdist_wheel python setup.py sdist bdist_wheel
./script/build/linux
docker build -t docker/compose:$TAG -f Dockerfile.run . docker build \
--build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}" \
-t "${TAG}" .

View File

@ -4,10 +4,14 @@ set -ex
./script/clean ./script/clean
TAG="docker-compose" DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
docker build -t "$TAG" . TAG="docker/compose:tmp-glibc-linux-binary-${DOCKER_COMPOSE_GITSHA}"
docker run \
--rm --entrypoint="script/build/linux-entrypoint" \ docker build -t "${TAG}" . \
-v $(pwd)/dist:/code/dist \ --build-arg BUILD_PLATFORM=debian \
-v $(pwd)/.git:/code/.git \ --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
"$TAG" TMP_CONTAINER=$(docker create "${TAG}")
mkdir -p dist
docker cp "${TMP_CONTAINER}":/usr/local/bin/docker-compose dist/docker-compose-Linux-x86_64
docker container rm -f "${TMP_CONTAINER}"
docker image rm -f "${TAG}"

View File

@ -2,14 +2,38 @@
set -ex set -ex
TARGET=dist/docker-compose-$(uname -s)-$(uname -m) CODE_PATH=/code
VENV=/code/.tox/py36 VENV="${CODE_PATH}"/.tox/py37
mkdir -p `pwd`/dist cd "${CODE_PATH}"
chmod 777 `pwd`/dist mkdir -p dist
chmod 777 dist
$VENV/bin/pip install -q -r requirements-build.txt "${VENV}"/bin/pip3 install -q -r requirements-build.txt
./script/build/write-git-sha
su -c "$VENV/bin/pyinstaller docker-compose.spec" user # TODO(ulyssessouza) To check if really needed
mv dist/docker-compose $TARGET if [ -z "${DOCKER_COMPOSE_GITSHA}" ]; then
$TARGET version DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
fi
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
export PATH="${CODE_PATH}/pyinstaller:${PATH}"
if [ ! -z "${BUILD_BOOTLOADER}" ]; then
# Build bootloader for alpine
git clone --single-branch --branch master https://github.com/pyinstaller/pyinstaller.git /tmp/pyinstaller
cd /tmp/pyinstaller/bootloader
git checkout v3.4
"${VENV}"/bin/python3 ./waf configure --no-lsb all
"${VENV}"/bin/pip3 install ..
cd "${CODE_PATH}"
rm -Rf /tmp/pyinstaller
else
echo "NOT compiling bootloader!!!"
fi
"${VENV}"/bin/pyinstaller --exclude-module pycrypto --exclude-module PyInstaller docker-compose.spec
ls -la dist/
ldd dist/docker-compose
mv dist/docker-compose /usr/local/bin
docker-compose version

View File

@ -5,11 +5,12 @@ TOOLCHAIN_PATH="$(realpath $(dirname $0)/../../build/toolchain)"
rm -rf venv rm -rf venv
virtualenv -p ${TOOLCHAIN_PATH}/bin/python3 venv virtualenv -p "${TOOLCHAIN_PATH}"/bin/python3 venv
venv/bin/pip install -r requirements.txt venv/bin/pip install -r requirements.txt
venv/bin/pip install -r requirements-build.txt venv/bin/pip install -r requirements-build.txt
venv/bin/pip install --no-deps . venv/bin/pip install --no-deps .
./script/build/write-git-sha DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
venv/bin/pyinstaller docker-compose.spec venv/bin/pyinstaller docker-compose.spec
mv dist/docker-compose dist/docker-compose-Darwin-x86_64 mv dist/docker-compose dist/docker-compose-Darwin-x86_64
dist/docker-compose-Darwin-x86_64 version dist/docker-compose-Darwin-x86_64 version

View File

@ -7,11 +7,12 @@ if [ -z "$1" ]; then
exit 1 exit 1
fi fi
TAG=$1 TAG="$1"
IMAGE="docker/compose-tests"
docker build -t docker-compose-tests:tmp . DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
ctnr_id=$(docker create --entrypoint=tox docker-compose-tests:tmp) docker build -t "${IMAGE}:${TAG}" . \
docker commit $ctnr_id docker/compose-tests:latest --target build \
docker tag docker/compose-tests:latest docker/compose-tests:$TAG --build-arg BUILD_PLATFORM="debian" \
docker rm -f $ctnr_id --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
docker rmi -f docker-compose-tests:tmp docker tag "${IMAGE}":"${TAG}" "${IMAGE}":latest

View File

@ -6,17 +6,17 @@
# #
# http://git-scm.com/download/win # http://git-scm.com/download/win
# #
# 2. Install Python 3.6.4: # 2. Install Python 3.7.2:
# #
# https://www.python.org/downloads/ # https://www.python.org/downloads/
# #
# 3. Append ";C:\Python36;C:\Python36\Scripts" to the "Path" environment variable: # 3. Append ";C:\Python37;C:\Python37\Scripts" to the "Path" environment variable:
# #
# https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true # https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true
# #
# 4. In Powershell, run the following commands: # 4. In Powershell, run the following commands:
# #
# $ pip install 'virtualenv>=15.1.0' # $ pip install 'virtualenv==16.2.0'
# $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned # $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned
# #
# 5. Clone the repository: # 5. Clone the repository:

View File

@ -9,4 +9,4 @@ if [[ "${?}" != "0" ]]; then
echo "Couldn't get revision of the git repository. Setting to 'unknown' instead" echo "Couldn't get revision of the git repository. Setting to 'unknown' instead"
DOCKER_COMPOSE_GITSHA="unknown" DOCKER_COMPOSE_GITSHA="unknown"
fi fi
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA echo "${DOCKER_COMPOSE_GITSHA}"

View File

@ -192,6 +192,8 @@ be handled manually by the operator:
- Bump the version in `compose/__init__.py` to the *next* minor version - Bump the version in `compose/__init__.py` to the *next* minor version
number with `dev` appended. For example, if you just released `1.4.0`, number with `dev` appended. For example, if you just released `1.4.0`,
update it to `1.5.0dev` update it to `1.5.0dev`
- Update compose_version in [github.com/docker/docker.github.io/blob/master/_config.yml](https://github.com/docker/docker.github.io/blob/master/_config.yml) and [github.com/docker/docker.github.io/blob/master/_config_authoring.yml](https://github.com/docker/docker.github.io/blob/master/_config_authoring.yml)
- Update the release note in [github.com/docker/docker.github.io](https://github.com/docker/docker.github.io/blob/master/release-notes/docker-compose.md)
## Advanced options ## Advanced options

View File

@ -15,6 +15,7 @@ from release.const import NAME
from release.const import REPO_ROOT from release.const import REPO_ROOT
from release.downloader import BinaryDownloader from release.downloader import BinaryDownloader
from release.images import ImageManager from release.images import ImageManager
from release.images import is_tag_latest
from release.pypi import check_pypirc from release.pypi import check_pypirc
from release.pypi import pypi_upload from release.pypi import pypi_upload
from release.repository import delete_assets from release.repository import delete_assets
@ -204,7 +205,7 @@ def resume(args):
delete_assets(gh_release) delete_assets(gh_release)
upload_assets(gh_release, files) upload_assets(gh_release, files)
img_manager = ImageManager(args.release) img_manager = ImageManager(args.release)
img_manager.build_images(repository, files) img_manager.build_images(repository)
except ScriptError as e: except ScriptError as e:
print(e) print(e)
return 1 return 1
@ -244,7 +245,7 @@ def start(args):
gh_release = create_release_draft(repository, args.release, pr_data, files) gh_release = create_release_draft(repository, args.release, pr_data, files)
upload_assets(gh_release, files) upload_assets(gh_release, files)
img_manager = ImageManager(args.release) img_manager = ImageManager(args.release)
img_manager.build_images(repository, files) img_manager.build_images(repository)
except ScriptError as e: except ScriptError as e:
print(e) print(e)
return 1 return 1
@ -258,7 +259,8 @@ def finalize(args):
try: try:
check_pypirc() check_pypirc()
repository = Repository(REPO_ROOT, args.repo) repository = Repository(REPO_ROOT, args.repo)
img_manager = ImageManager(args.release) tag_as_latest = is_tag_latest(args.release)
img_manager = ImageManager(args.release, tag_as_latest)
pr_data = repository.find_release_pr(args.release) pr_data = repository.find_release_pr(args.release)
if not pr_data: if not pr_data:
raise ScriptError('No PR found for {}'.format(args.release)) raise ScriptError('No PR found for {}'.format(args.release))

View File

@ -6,4 +6,5 @@ import os
REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', '..') REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', '..')
NAME = 'docker/compose' NAME = 'docker/compose'
COMPOSE_TESTS_IMAGE_BASE_NAME = NAME + '-tests'
BINTRAY_ORG = 'docker-compose' BINTRAY_ORG = 'docker-compose'

View File

@ -5,18 +5,36 @@ from __future__ import unicode_literals
import base64 import base64
import json import json
import os import os
import shutil
import docker import docker
from enum import Enum
from .const import NAME
from .const import REPO_ROOT from .const import REPO_ROOT
from .utils import ScriptError from .utils import ScriptError
from .utils import yesno
from script.release.release.const import COMPOSE_TESTS_IMAGE_BASE_NAME
class Platform(Enum):
ALPINE = 'alpine'
DEBIAN = 'debian'
def __str__(self):
return self.value
# Checks if this version respects the GA version format ('x.y.z') and not an RC
def is_tag_latest(version):
ga_version = all(n.isdigit() for n in version.split('.')) and version.count('.') == 2
return ga_version and yesno('Should this release be tagged as \"latest\"? [Y/n]: ', default=True)
class ImageManager(object): class ImageManager(object):
def __init__(self, version): def __init__(self, version, latest=False):
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env()) self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
self.version = version self.version = version
self.latest = latest
if 'HUB_CREDENTIALS' in os.environ: if 'HUB_CREDENTIALS' in os.environ:
print('HUB_CREDENTIALS found in environment, issuing login') print('HUB_CREDENTIALS found in environment, issuing login')
credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS'])) credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
@ -24,16 +42,36 @@ class ImageManager(object):
username=credentials['Username'], password=credentials['Password'] username=credentials['Username'], password=credentials['Password']
) )
def build_images(self, repository, files): def _tag(self, image, existing_tag, new_tag):
print("Building release images...") existing_repo_tag = '{image}:{tag}'.format(image=image, tag=existing_tag)
repository.write_git_sha() new_repo_tag = '{image}:{tag}'.format(image=image, tag=new_tag)
distdir = os.path.join(REPO_ROOT, 'dist') self.docker_client.tag(existing_repo_tag, new_repo_tag)
os.makedirs(distdir, exist_ok=True)
shutil.copy(files['docker-compose-Linux-x86_64'][0], distdir) def get_full_version(self, platform=None):
os.chmod(os.path.join(distdir, 'docker-compose-Linux-x86_64'), 0o755) return self.version + '-' + platform.__str__() if platform else self.version
print('Building docker/compose image')
def get_runtime_image_tag(self, tag):
return '{image_base_image}:{tag}'.format(
image_base_image=NAME,
tag=self.get_full_version(tag)
)
def build_runtime_image(self, repository, platform):
git_sha = repository.write_git_sha()
compose_image_base_name = NAME
print('Building {image} image ({platform} based)'.format(
image=compose_image_base_name,
platform=platform
))
full_version = self.get_full_version(platform)
build_tag = self.get_runtime_image_tag(platform)
logstream = self.docker_client.build( logstream = self.docker_client.build(
REPO_ROOT, tag='docker/compose:{}'.format(self.version), dockerfile='Dockerfile.run', REPO_ROOT,
tag=build_tag,
buildargs={
'BUILD_PLATFORM': platform.value,
'GIT_COMMIT': git_sha,
},
decode=True decode=True
) )
for chunk in logstream: for chunk in logstream:
@ -42,9 +80,33 @@ class ImageManager(object):
if 'stream' in chunk: if 'stream' in chunk:
print(chunk['stream'], end='') print(chunk['stream'], end='')
print('Building test image (for UCP e2e)') if platform == Platform.ALPINE:
self._tag(compose_image_base_name, full_version, self.version)
if self.latest:
self._tag(compose_image_base_name, full_version, platform)
if platform == Platform.ALPINE:
self._tag(compose_image_base_name, full_version, 'latest')
def get_ucp_test_image_tag(self, tag=None):
return '{image}:{tag}'.format(
image=COMPOSE_TESTS_IMAGE_BASE_NAME,
tag=tag or self.version
)
# Used for producing a test image for UCP
def build_ucp_test_image(self, repository):
print('Building test image (debian based for UCP e2e)')
git_sha = repository.write_git_sha()
ucp_test_image_tag = self.get_ucp_test_image_tag()
logstream = self.docker_client.build( logstream = self.docker_client.build(
REPO_ROOT, tag='docker-compose-tests:tmp', decode=True REPO_ROOT,
tag=ucp_test_image_tag,
target='build',
buildargs={
'BUILD_PLATFORM': Platform.DEBIAN.value,
'GIT_COMMIT': git_sha,
},
decode=True
) )
for chunk in logstream: for chunk in logstream:
if 'error' in chunk: if 'error' in chunk:
@ -52,26 +114,15 @@ class ImageManager(object):
if 'stream' in chunk: if 'stream' in chunk:
print(chunk['stream'], end='') print(chunk['stream'], end='')
container = self.docker_client.create_container( self._tag(COMPOSE_TESTS_IMAGE_BASE_NAME, self.version, 'latest')
'docker-compose-tests:tmp', entrypoint='tox'
)
self.docker_client.commit(container, 'docker/compose-tests', 'latest')
self.docker_client.tag(
'docker/compose-tests:latest', 'docker/compose-tests:{}'.format(self.version)
)
self.docker_client.remove_container(container, force=True)
self.docker_client.remove_image('docker-compose-tests:tmp', force=True)
@property def build_images(self, repository):
def image_names(self): self.build_runtime_image(repository, Platform.ALPINE)
return [ self.build_runtime_image(repository, Platform.DEBIAN)
'docker/compose-tests:latest', self.build_ucp_test_image(repository)
'docker/compose-tests:{}'.format(self.version),
'docker/compose:{}'.format(self.version)
]
def check_images(self): def check_images(self):
for name in self.image_names: for name in self.get_images_to_push():
try: try:
self.docker_client.inspect_image(name) self.docker_client.inspect_image(name)
except docker.errors.ImageNotFound: except docker.errors.ImageNotFound:
@ -79,8 +130,22 @@ class ImageManager(object):
return False return False
return True return True
def get_images_to_push(self):
tags_to_push = {
"{}:{}".format(NAME, self.version),
self.get_runtime_image_tag(Platform.ALPINE),
self.get_runtime_image_tag(Platform.DEBIAN),
self.get_ucp_test_image_tag(),
self.get_ucp_test_image_tag('latest'),
}
if is_tag_latest(self.version):
tags_to_push.add("{}:latest".format(NAME))
return tags_to_push
def push_images(self): def push_images(self):
for name in self.image_names: tags_to_push = self.get_images_to_push()
print('Build tags to push {}'.format(tags_to_push))
for name in tags_to_push:
print('Pushing {} to Docker Hub'.format(name)) print('Pushing {} to Docker Hub'.format(name))
logstream = self.docker_client.push(name, stream=True, decode=True) logstream = self.docker_client.push(name, stream=True, decode=True)
for chunk in logstream: for chunk in logstream:

View File

@ -175,6 +175,7 @@ class Repository(object):
def write_git_sha(self): def write_git_sha(self):
with open(os.path.join(REPO_ROOT, 'compose', 'GITSHA'), 'w') as f: with open(os.path.join(REPO_ROOT, 'compose', 'GITSHA'), 'w') as f:
f.write(self.git_repo.head.commit.hexsha[:7]) f.write(self.git_repo.head.commit.hexsha[:7])
return self.git_repo.head.commit.hexsha[:7]
def cherry_pick_prs(self, release_branch, ids): def cherry_pick_prs(self, release_branch, ids):
if not ids: if not ids:
@ -219,7 +220,7 @@ def get_contributors(pr_data):
commits = pr_data.get_commits() commits = pr_data.get_commits()
authors = {} authors = {}
for commit in commits: for commit in commits:
if not commit.author: if not commit or not commit.author or not commit.author.login:
continue continue
author = commit.author.login author = commit.author.login
authors[author] = authors.get(author, 0) + 1 authors[author] = authors.get(author, 0) + 1

View File

@ -15,7 +15,7 @@
set -e set -e
VERSION="1.24.0" VERSION="1.25.0-rc1"
IMAGE="docker/compose:$VERSION" IMAGE="docker/compose:$VERSION"
@ -48,7 +48,7 @@ fi
# Only allocate tty if we detect one # Only allocate tty if we detect one
if [ -t 0 -a -t 1 ]; then if [ -t 0 -a -t 1 ]; then
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t" DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
fi fi
# Always set -i to support piped and terminal input in run/exec # Always set -i to support piped and terminal input in run/exec

View File

@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62 SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
fi fi
OPENSSL_VERSION=1.1.0j OPENSSL_VERSION=1.1.1a
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
OPENSSL_SHA1=dcad1efbacd9a4ed67d4514470af12bbe2a1d60a OPENSSL_SHA1=8fae27b4f34445a5500c9dc50ae66b4d6472ce29
PYTHON_VERSION=3.6.8 PYTHON_VERSION=3.7.2
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
PYTHON_SHA1=09fcc4edaef0915b4dedbfb462f1cd15f82d3a6f PYTHON_SHA1=0cd8e52d8ed1d0be12ac8e87a623a15df3a3b418
# #
# Install prerequisites. # Install prerequisites.
@ -36,7 +36,7 @@ if ! [ -x "$(command -v python3)" ]; then
brew install python3 brew install python3
fi fi
if ! [ -x "$(command -v virtualenv)" ]; then if ! [ -x "$(command -v virtualenv)" ]; then
pip install virtualenv pip install virtualenv==16.2.0
fi fi
# #
@ -50,7 +50,7 @@ mkdir -p ${TOOLCHAIN_PATH}
# #
# Set macOS SDK. # Set macOS SDK.
# #
if [ ${SDK_FETCH} ]; then if [[ ${SDK_FETCH} && ! -f ${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk/SDKSettings.plist ]]; then
SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk
fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1} fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1}
else else
@ -61,7 +61,7 @@ fi
# Build OpenSSL. # Build OpenSSL.
# #
OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION} OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION}
if ! [ -f ${TOOLCHAIN_PATH}/bin/openssl ]; then if ! [[ $(${TOOLCHAIN_PATH}/bin/openssl version) == *"${OPENSSL_VERSION}"* ]]; then
rm -rf ${OPENSSL_SRC_PATH} rm -rf ${OPENSSL_SRC_PATH}
fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1} fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1}
( (
@ -77,7 +77,7 @@ fi
# Build Python. # Build Python.
# #
PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION} PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION}
if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then if ! [[ $(${TOOLCHAIN_PATH}/bin/python3 --version) == *"${PYTHON_VERSION}"* ]]; then
rm -rf ${PYTHON_SRC_PATH} rm -rf ${PYTHON_SRC_PATH}
fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1} fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1}
( (
@ -87,9 +87,10 @@ if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
--datarootdir=${TOOLCHAIN_PATH}/share \ --datarootdir=${TOOLCHAIN_PATH}/share \
--datadir=${TOOLCHAIN_PATH}/share \ --datadir=${TOOLCHAIN_PATH}/share \
--enable-framework=${TOOLCHAIN_PATH}/Frameworks \ --enable-framework=${TOOLCHAIN_PATH}/Frameworks \
--with-openssl=${TOOLCHAIN_PATH} \
MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \ MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \
CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \ CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \
CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}include" \ CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}/include" \
LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib" LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib"
make -j 4 make -j 4
make install PYTHONAPPSDIR=${TOOLCHAIN_PATH} make install PYTHONAPPSDIR=${TOOLCHAIN_PATH}
@ -97,6 +98,11 @@ if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
) )
fi fi
#
# Smoke test built Python.
#
openssl_version ${TOOLCHAIN_PATH}
echo "" echo ""
echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}" echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}"
echo "*** Using SDK ${SDK_PATH}" echo "*** Using SDK ${SDK_PATH}"

View File

@ -8,8 +8,7 @@ set -e
docker run --rm \ docker run --rm \
--tty \ --tty \
${GIT_VOLUME} \ ${GIT_VOLUME} \
--entrypoint="tox" \ "$TAG" tox -e pre-commit
"$TAG" -e pre-commit
get_versions="docker run --rm get_versions="docker run --rm
--entrypoint=/code/.tox/py27/bin/python --entrypoint=/code/.tox/py27/bin/python
@ -24,7 +23,7 @@ fi
BUILD_NUMBER=${BUILD_NUMBER-$USER} BUILD_NUMBER=${BUILD_NUMBER-$USER}
PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py36} PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py37}
for version in $DOCKER_VERSIONS; do for version in $DOCKER_VERSIONS; do
>&2 echo "Running tests against Docker $version" >&2 echo "Running tests against Docker $version"

View File

@ -20,6 +20,3 @@ export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER"
GIT_VOLUME="--volumes-from=$(hostname)" GIT_VOLUME="--volumes-from=$(hostname)"
. script/test/all . script/test/all
>&2 echo "Building Linux binary"
. script/build/linux-entrypoint

View File

@ -3,17 +3,18 @@
set -ex set -ex
TAG="docker-compose:$(git rev-parse --short HEAD)" TAG="docker-compose:alpine-$(git rev-parse --short HEAD)"
# By default use the Dockerfile, but can be overridden to use an alternative file # By default use the Dockerfile, but can be overridden to use an alternative file
# e.g DOCKERFILE=Dockerfile.armhf script/test/default # e.g DOCKERFILE=Dockerfile.s390x script/test/default
DOCKERFILE="${DOCKERFILE:-Dockerfile}" DOCKERFILE="${DOCKERFILE:-Dockerfile}"
DOCKER_BUILD_TARGET="${DOCKER_BUILD_TARGET:-build}"
rm -rf coverage-html rm -rf coverage-html
# Create the host directory so it's owned by $USER # Create the host directory so it's owned by $USER
mkdir -p coverage-html mkdir -p coverage-html
docker build -f ${DOCKERFILE} -t "$TAG" . docker build -f "${DOCKERFILE}" -t "${TAG}" --target "${DOCKER_BUILD_TARGET}" .
GIT_VOLUME="--volume=$(pwd)/.git:/code/.git" GIT_VOLUME="--volume=$(pwd)/.git:/code/.git"
. script/test/all . script/test/all

View File

@ -33,10 +33,10 @@ install_requires = [
'cached-property >= 1.2.0, < 2', 'cached-property >= 1.2.0, < 2',
'docopt >= 0.6.1, < 0.7', 'docopt >= 0.6.1, < 0.7',
'PyYAML >= 3.10, < 4.3', 'PyYAML >= 3.10, < 4.3',
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.21', 'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.23',
'texttable >= 0.9.0, < 0.10', 'texttable >= 0.9.0, < 0.10',
'websocket-client >= 0.32.0, < 1.0', 'websocket-client >= 0.32.0, < 1.0',
'docker[ssh] >= 3.7.0, < 4.0', 'docker[ssh] >= 3.7.0, < 4.0.2',
'dockerpty >= 0.4.1, < 0.5', 'dockerpty >= 0.4.1, < 0.5',
'six >= 1.3.0, < 2', 'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3', 'jsonschema >= 2.5.1, < 3',

View File

@ -11,6 +11,7 @@ import subprocess
import time import time
from collections import Counter from collections import Counter
from collections import namedtuple from collections import namedtuple
from functools import reduce
from operator import attrgetter from operator import attrgetter
import pytest import pytest
@ -170,6 +171,13 @@ class CLITestCase(DockerClientTestCase):
# Prevent tearDown from trying to create a project # Prevent tearDown from trying to create a project
self.base_dir = None self.base_dir = None
def test_quiet_build(self):
self.base_dir = 'tests/fixtures/build-args'
result = self.dispatch(['build'], None)
quietResult = self.dispatch(['build', '-q'], None)
assert result.stdout != ""
assert quietResult.stdout == ""
def test_help_nonexistent(self): def test_help_nonexistent(self):
self.base_dir = 'tests/fixtures/no-composefile' self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'foobar'], returncode=1) result = self.dispatch(['help', 'foobar'], returncode=1)
@ -324,6 +332,21 @@ class CLITestCase(DockerClientTestCase):
'version': '2.4' 'version': '2.4'
} }
def test_config_with_env_file(self):
self.base_dir = 'tests/fixtures/default-env-file'
result = self.dispatch(['--env-file', '.env2', 'config'])
json_result = yaml.load(result.stdout)
assert json_result == {
'services': {
'web': {
'command': 'false',
'image': 'alpine:latest',
'ports': ['5644/tcp', '9998/tcp']
}
},
'version': '2.4'
}
def test_config_with_dot_env_and_override_dir(self): def test_config_with_dot_env_and_override_dir(self):
self.base_dir = 'tests/fixtures/default-env-file' self.base_dir = 'tests/fixtures/default-env-file'
result = self.dispatch(['--project-directory', 'alt/', 'config']) result = self.dispatch(['--project-directory', 'alt/', 'config'])
@ -633,6 +656,13 @@ class CLITestCase(DockerClientTestCase):
'image library/nonexisting-image:latest not found' in result.stderr or 'image library/nonexisting-image:latest not found' in result.stderr or
'pull access denied for nonexisting-image' in result.stderr) 'pull access denied for nonexisting-image' in result.stderr)
def test_pull_with_build(self):
result = self.dispatch(['-f', 'pull-with-build.yml', 'pull'])
assert 'Pulling simple' not in result.stderr
assert 'Pulling from_simple' not in result.stderr
assert 'Pulling another ...' in result.stderr
def test_pull_with_quiet(self): def test_pull_with_quiet(self):
assert self.dispatch(['pull', '--quiet']).stderr == '' assert self.dispatch(['pull', '--quiet']).stderr == ''
assert self.dispatch(['pull', '--quiet']).stdout == '' assert self.dispatch(['pull', '--quiet']).stdout == ''
@ -747,6 +777,26 @@ class CLITestCase(DockerClientTestCase):
] ]
assert not containers assert not containers
def test_build_rm(self):
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(all=True)
]
assert not containers
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', '--no-rm', 'simple'], returncode=0)
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(all=True)
]
assert containers
for c in self.project.client.containers(all=True):
self.addCleanup(self.project.client.remove_container, c, force=True)
def test_build_shm_size_build_option(self): def test_build_shm_size_build_option(self):
pull_busybox(self.client) pull_busybox(self.client)
self.base_dir = 'tests/fixtures/build-shm-size' self.base_dir = 'tests/fixtures/build-shm-size'
@ -1108,6 +1158,22 @@ class CLITestCase(DockerClientTestCase):
] ]
assert len(remote_volumes) > 0 assert len(remote_volumes) > 0
@v2_only()
def test_up_no_start_remove_orphans(self):
self.base_dir = 'tests/fixtures/v2-simple'
self.dispatch(['up', '--no-start'], None)
services = self.project.get_services()
stopped = reduce((lambda prev, next: prev.containers(
stopped=True) + next.containers(stopped=True)), services)
assert len(stopped) == 2
self.dispatch(['-f', 'one-container.yml', 'up', '--no-start', '--remove-orphans'], None)
stopped2 = reduce((lambda prev, next: prev.containers(
stopped=True) + next.containers(stopped=True)), services)
assert len(stopped2) == 1
@v2_only() @v2_only()
def test_up_no_ansi(self): def test_up_no_ansi(self):
self.base_dir = 'tests/fixtures/v2-simple' self.base_dir = 'tests/fixtures/v2-simple'
@ -2301,6 +2367,7 @@ class CLITestCase(DockerClientTestCase):
assert 'another' in result.stdout assert 'another' in result.stdout
assert 'exited with code 0' in result.stdout assert 'exited with code 0' in result.stdout
@pytest.mark.skip(reason="race condition between up and logs")
def test_logs_follow_logs_from_new_containers(self): def test_logs_follow_logs_from_new_containers(self):
self.base_dir = 'tests/fixtures/logs-composefile' self.base_dir = 'tests/fixtures/logs-composefile'
self.dispatch(['up', '-d', 'simple']) self.dispatch(['up', '-d', 'simple'])
@ -2327,6 +2394,7 @@ class CLITestCase(DockerClientTestCase):
assert '{} exited with code 0'.format(another_name) in result.stdout assert '{} exited with code 0'.format(another_name) in result.stdout
assert '{} exited with code 137'.format(simple_name) in result.stdout assert '{} exited with code 137'.format(simple_name) in result.stdout
@pytest.mark.skip(reason="race condition between up and logs")
def test_logs_follow_logs_from_restarted_containers(self): def test_logs_follow_logs_from_restarted_containers(self):
self.base_dir = 'tests/fixtures/logs-restart-composefile' self.base_dir = 'tests/fixtures/logs-restart-composefile'
proc = start_process(self.base_dir, ['up']) proc = start_process(self.base_dir, ['up'])
@ -2347,6 +2415,7 @@ class CLITestCase(DockerClientTestCase):
) == 3 ) == 3
assert result.stdout.count('world') == 3 assert result.stdout.count('world') == 3
@pytest.mark.skip(reason="race condition between up and logs")
def test_logs_default(self): def test_logs_default(self):
self.base_dir = 'tests/fixtures/logs-composefile' self.base_dir = 'tests/fixtures/logs-composefile'
self.dispatch(['up', '-d']) self.dispatch(['up', '-d'])
@ -2473,10 +2542,12 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d']) self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('db').containers()) == 1
assert len(project.get_service('worker').containers()) == 0
self.dispatch(['up', '-d', '--scale', 'web=3']) self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'worker=1'])
assert len(project.get_service('web').containers()) == 3 assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('db').containers()) == 1
assert len(project.get_service('worker').containers()) == 1
def test_up_scale_scale_down(self): def test_up_scale_scale_down(self):
self.base_dir = 'tests/fixtures/scale' self.base_dir = 'tests/fixtures/scale'
@ -2485,22 +2556,26 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d']) self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('db').containers()) == 1
assert len(project.get_service('worker').containers()) == 0
self.dispatch(['up', '-d', '--scale', 'web=1']) self.dispatch(['up', '-d', '--scale', 'web=1'])
assert len(project.get_service('web').containers()) == 1 assert len(project.get_service('web').containers()) == 1
assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('db').containers()) == 1
assert len(project.get_service('worker').containers()) == 0
def test_up_scale_reset(self): def test_up_scale_reset(self):
self.base_dir = 'tests/fixtures/scale' self.base_dir = 'tests/fixtures/scale'
project = self.project project = self.project
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3']) self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3', '--scale', 'worker=3'])
assert len(project.get_service('web').containers()) == 3 assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 3 assert len(project.get_service('db').containers()) == 3
assert len(project.get_service('worker').containers()) == 3
self.dispatch(['up', '-d']) self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('db').containers()) == 1
assert len(project.get_service('worker').containers()) == 0
def test_up_scale_to_zero(self): def test_up_scale_to_zero(self):
self.base_dir = 'tests/fixtures/scale' self.base_dir = 'tests/fixtures/scale'
@ -2509,10 +2584,12 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d']) self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('db').containers()) == 1
assert len(project.get_service('worker').containers()) == 0
self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0']) self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0', '--scale', 'worker=0'])
assert len(project.get_service('web').containers()) == 0 assert len(project.get_service('web').containers()) == 0
assert len(project.get_service('db').containers()) == 0 assert len(project.get_service('db').containers()) == 0
assert len(project.get_service('worker').containers()) == 0
def test_port(self): def test_port(self):
self.base_dir = 'tests/fixtures/ports-composefile' self.base_dir = 'tests/fixtures/ports-composefile'

4
tests/fixtures/default-env-file/.env2 vendored Normal file
View File

@ -0,0 +1,4 @@
IMAGE=alpine:latest
COMMAND=false
PORT1=5644
PORT2=9998

View File

@ -1,6 +1,6 @@
simple: simple:
image: busybox:latest image: busybox:latest
command: sh -c "echo hello && tail -f /dev/null" command: sh -c "sleep 1 && echo hello && tail -f /dev/null"
another: another:
image: busybox:latest image: busybox:latest
command: sh -c "echo test" command: sh -c "sleep 1 && echo test"

View File

@ -3,5 +3,5 @@ simple:
command: sh -c "echo hello && tail -f /dev/null" command: sh -c "echo hello && tail -f /dev/null"
another: another:
image: busybox:latest image: busybox:latest
command: sh -c "sleep 0.5 && echo world && /bin/false" command: sh -c "sleep 2 && echo world && /bin/false"
restart: "on-failure:2" restart: "on-failure:2"

View File

@ -5,5 +5,9 @@ services:
command: top command: top
scale: 2 scale: 2
db: db:
image: busybox image: busybox
command: top command: top
worker:
image: busybox
command: top
scale: 0

View File

@ -0,0 +1,11 @@
version: "3"
services:
build_simple:
image: simple
build: .
command: top
from_simple:
image: simple
another:
image: busybox:latest
command: top

View File

@ -0,0 +1,5 @@
version: "2"
services:
simple:
image: busybox:latest
command: top

View File

@ -0,0 +1,52 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import tempfile
from ddt import data
from ddt import ddt
from .. import mock
from compose.cli.command import project_from_options
from tests.integration.testcases import DockerClientTestCase
@ddt
class EnvironmentTest(DockerClientTestCase):
@classmethod
def setUpClass(cls):
super(EnvironmentTest, cls).setUpClass()
cls.compose_file = tempfile.NamedTemporaryFile(mode='w+b')
cls.compose_file.write(bytes("""version: '3.2'
services:
svc:
image: busybox:latest
environment:
TEST_VARIABLE: ${TEST_VARIABLE}""", encoding='utf-8'))
cls.compose_file.flush()
@classmethod
def tearDownClass(cls):
super(EnvironmentTest, cls).tearDownClass()
cls.compose_file.close()
@data('events',
'exec',
'kill',
'logs',
'pause',
'ps',
'restart',
'rm',
'start',
'stop',
'top',
'unpause')
def _test_no_warning_on_missing_host_environment_var_on_silent_commands(self, cmd):
options = {'COMMAND': cmd, '--file': [EnvironmentTest.compose_file.name]}
with mock.patch('compose.config.environment.log') as fake_log:
# Note that the warning silencing and the env variables check is
# done in `project_from_options`
# So no need to have a proper options map, the `COMMAND` key is enough
project_from_options('.', options)
assert fake_log.warn.call_count == 0

View File

@ -1,6 +1,7 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
import copy
import json import json
import os import os
import random import random
@ -1496,6 +1497,60 @@ class ProjectTest(DockerClientTestCase):
output = container.logs() output = container.logs()
assert output == b"This is the secret\n" assert output == b"This is the secret\n"
@v3_only()
def test_project_up_with_added_secrets(self):
node = create_host_file(self.client, os.path.abspath('tests/fixtures/secrets/default'))
config_input1 = {
'version': V3_1,
'services': [
{
'name': 'web',
'image': 'busybox:latest',
'command': 'cat /run/secrets/special',
'environment': ['constraint:node=={}'.format(node if node is not None else '')]
}
],
'secrets': {
'super': {
'file': os.path.abspath('tests/fixtures/secrets/default')
}
}
}
config_input2 = copy.deepcopy(config_input1)
# Add the secret
config_input2['services'][0]['secrets'] = [
types.ServiceSecret.parse({'source': 'super', 'target': 'special'})
]
config_data1 = build_config(**config_input1)
config_data2 = build_config(**config_input2)
# First up with non-secret
project = Project.from_config(
client=self.client,
name='composetest',
config_data=config_data1,
)
project.up()
# Then up with secret
project = Project.from_config(
client=self.client,
name='composetest',
config_data=config_data2,
)
project.up()
project.stop()
containers = project.containers(stopped=True)
assert len(containers) == 1
container, = containers
output = container.logs()
assert output == b"This is the secret\n"
@v2_only() @v2_only()
def test_initialize_volumes_invalid_volume_driver(self): def test_initialize_volumes_invalid_volume_driver(self):
vol_name = '{0:x}'.format(random.getrandbits(32)) vol_name = '{0:x}'.format(random.getrandbits(32))

View File

@ -695,8 +695,8 @@ class ServiceTest(DockerClientTestCase):
new_container, = service.execute_convergence_plan( new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container])) ConvergencePlan('recreate', [old_container]))
mock_log.warn.assert_called_once_with(mock.ANY) mock_log.warning.assert_called_once_with(mock.ANY)
_, args, kwargs = mock_log.warn.mock_calls[0] _, args, kwargs = mock_log.warning.mock_calls[0]
assert "Service \"db\" is using volume \"/data\" from the previous container" in args[0] assert "Service \"db\" is using volume \"/data\" from the previous container" in args[0]
assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
@ -1382,7 +1382,7 @@ class ServiceTest(DockerClientTestCase):
with pytest.raises(OperationFailedError): with pytest.raises(OperationFailedError):
service.scale(3) service.scale(3)
captured_output = mock_log.warn.call_args[0][0] captured_output = mock_log.warning.call_args[0][0]
assert len(service.containers()) == 1 assert len(service.containers()) == 1
assert "Remove the custom name to scale the service." in captured_output assert "Remove the custom name to scale the service." in captured_output

View File

@ -5,6 +5,8 @@ by `docker-compose up`.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
import copy
import py import py
from docker.errors import ImageNotFound from docker.errors import ImageNotFound
@ -209,6 +211,143 @@ class ProjectWithDependenciesTest(ProjectTestCase):
} }
class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
def setUp(self):
super(ProjectWithDependsOnDependenciesTest, self).setUp()
self.cfg = {
'version': '2',
'services': {
'db': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
},
'web': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
'depends_on': ['db'],
},
'nginx': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
'depends_on': ['web'],
},
}
}
def test_up(self):
local_cfg = copy.deepcopy(self.cfg)
containers = self.run_up(local_cfg)
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
def test_change_leaf(self):
local_cfg = copy.deepcopy(self.cfg)
old_containers = self.run_up(local_cfg)
local_cfg['services']['nginx']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(local_cfg)
assert set(c.service for c in new_containers - old_containers) == set(['nginx'])
def test_change_middle(self):
local_cfg = copy.deepcopy(self.cfg)
old_containers = self.run_up(local_cfg)
local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(local_cfg)
assert set(c.service for c in new_containers - old_containers) == set(['web'])
def test_change_middle_always_recreate_deps(self):
local_cfg = copy.deepcopy(self.cfg)
old_containers = self.run_up(local_cfg, always_recreate_deps=True)
local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(local_cfg, always_recreate_deps=True)
assert set(c.service for c in new_containers - old_containers) == set(['web', 'nginx'])
def test_change_root(self):
local_cfg = copy.deepcopy(self.cfg)
old_containers = self.run_up(local_cfg)
local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(local_cfg)
assert set(c.service for c in new_containers - old_containers) == set(['db'])
def test_change_root_always_recreate_deps(self):
local_cfg = copy.deepcopy(self.cfg)
old_containers = self.run_up(local_cfg, always_recreate_deps=True)
local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(local_cfg, always_recreate_deps=True)
assert set(c.service for c in new_containers - old_containers) == set(['db', 'web', 'nginx'])
def test_change_root_no_recreate(self):
local_cfg = copy.deepcopy(self.cfg)
old_containers = self.run_up(local_cfg)
local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(
local_cfg,
strategy=ConvergenceStrategy.never)
assert new_containers - old_containers == set()
def test_service_removed_while_down(self):
local_cfg = copy.deepcopy(self.cfg)
next_cfg = copy.deepcopy(self.cfg)
del next_cfg['services']['db']
del next_cfg['services']['web']['depends_on']
containers = self.run_up(local_cfg)
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
project = self.make_project(local_cfg)
project.stop(timeout=1)
next_containers = self.run_up(next_cfg)
assert set(c.service for c in next_containers) == set(['web', 'nginx'])
def test_service_removed_while_up(self):
local_cfg = copy.deepcopy(self.cfg)
containers = self.run_up(local_cfg)
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
del local_cfg['services']['db']
del local_cfg['services']['web']['depends_on']
containers = self.run_up(local_cfg)
assert set(c.service for c in containers) == set(['web', 'nginx'])
def test_dependency_removed(self):
local_cfg = copy.deepcopy(self.cfg)
next_cfg = copy.deepcopy(self.cfg)
del next_cfg['services']['nginx']['depends_on']
containers = self.run_up(local_cfg, service_names=['nginx'])
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
project = self.make_project(local_cfg)
project.stop(timeout=1)
next_containers = self.run_up(next_cfg, service_names=['nginx'])
assert set(c.service for c in next_containers if c.is_running) == set(['nginx'])
def test_dependency_added(self):
local_cfg = copy.deepcopy(self.cfg)
del local_cfg['services']['nginx']['depends_on']
containers = self.run_up(local_cfg, service_names=['nginx'])
assert set(c.service for c in containers) == set(['nginx'])
local_cfg['services']['nginx']['depends_on'] = ['db']
containers = self.run_up(local_cfg, service_names=['nginx'])
assert set(c.service for c in containers) == set(['nginx', 'db'])
class ServiceStateTest(DockerClientTestCase): class ServiceStateTest(DockerClientTestCase):
"""Test cases for Service.convergence_plan.""" """Test cases for Service.convergence_plan."""

View File

@ -10,6 +10,7 @@ from compose import service
from compose.cli.errors import UserError from compose.cli.errors import UserError
from compose.config.config import Config from compose.config.config import Config
from compose.const import COMPOSEFILE_V2_0 as V2_0 from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.service import NoSuchImageError
@pytest.fixture @pytest.fixture
@ -35,6 +36,16 @@ def test_get_image_digest_image_uses_digest(mock_service):
assert not mock_service.image.called assert not mock_service.image.called
def test_get_image_digest_from_repository(mock_service):
mock_service.options['image'] = 'abcd'
mock_service.image_name = 'abcd'
mock_service.image.side_effect = NoSuchImageError(None)
mock_service.get_image_registry_data.return_value = {'Descriptor': {'digest': 'digest'}}
digest = bundle.get_image_digest(mock_service)
assert digest == 'abcd@digest'
def test_get_image_digest_no_image(mock_service): def test_get_image_digest_no_image(mock_service):
with pytest.raises(UserError) as exc: with pytest.raises(UserError) as exc:
bundle.get_image_digest(service.Service(name='theservice')) bundle.get_image_digest(service.Service(name='theservice'))
@ -83,7 +94,7 @@ def test_to_bundle():
configs={} configs={}
) )
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log: with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
output = bundle.to_bundle(config, image_digests) output = bundle.to_bundle(config, image_digests)
assert mock_log.mock_calls == [ assert mock_log.mock_calls == [
@ -117,7 +128,7 @@ def test_convert_service_to_bundle():
'privileged': True, 'privileged': True,
} }
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log: with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
config = bundle.convert_service_to_bundle(name, service_dict, image_digest) config = bundle.convert_service_to_bundle(name, service_dict, image_digest)
mock_log.assert_called_once_with( mock_log.assert_called_once_with(
@ -166,7 +177,7 @@ def test_make_service_networks_default():
name = 'theservice' name = 'theservice'
service_dict = {} service_dict = {}
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log: with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict) networks = bundle.make_service_networks(name, service_dict)
assert not mock_log.called assert not mock_log.called
@ -184,7 +195,7 @@ def test_make_service_networks():
}, },
} }
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log: with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict) networks = bundle.make_service_networks(name, service_dict)
mock_log.assert_called_once_with( mock_log.assert_called_once_with(

View File

@ -247,5 +247,5 @@ class TestGetTlsVersion(object):
environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'} environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
with mock.patch('compose.cli.docker_client.log') as mock_log: with mock.patch('compose.cli.docker_client.log') as mock_log:
tls_version = get_tls_version(environment) tls_version = get_tls_version(environment)
mock_log.warn.assert_called_once_with(mock.ANY) mock_log.warning.assert_called_once_with(mock.ANY)
assert tls_version is None assert tls_version is None

View File

@ -63,7 +63,7 @@ class TestCLIMainTestCase(object):
with mock.patch('compose.cli.main.log') as fake_log: with mock.patch('compose.cli.main.log') as fake_log:
warn_for_swarm_mode(mock_client) warn_for_swarm_mode(mock_client)
assert fake_log.warn.call_count == 1 assert fake_log.warning.call_count == 1
class TestSetupConsoleHandlerTestCase(object): class TestSetupConsoleHandlerTestCase(object):

View File

@ -329,7 +329,7 @@ class ConfigTest(unittest.TestCase):
) )
assert 'Unexpected type for "version" key in "filename.yml"' \ assert 'Unexpected type for "version" key in "filename.yml"' \
in mock_logging.warn.call_args[0][0] in mock_logging.warning.call_args[0][0]
service_dicts = config_data.services service_dicts = config_data.services
assert service_sort(service_dicts) == service_sort([ assert service_sort(service_dicts) == service_sort([
@ -613,6 +613,25 @@ class ConfigTest(unittest.TestCase):
excinfo.exconly() excinfo.exconly()
) )
def test_config_integer_service_name_raise_validation_error_v2_when_no_interpolate(self):
with pytest.raises(ConfigurationError) as excinfo:
config.load(
build_config_details(
{
'version': '2',
'services': {1: {'image': 'busybox'}}
},
'working_dir',
'filename.yml'
),
interpolate=False
)
assert (
"In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'." in
excinfo.exconly()
)
def test_config_integer_service_property_raise_validation_error(self): def test_config_integer_service_property_raise_validation_error(self):
with pytest.raises(ConfigurationError) as excinfo: with pytest.raises(ConfigurationError) as excinfo:
config.load( config.load(
@ -3465,6 +3484,25 @@ class InterpolationTest(unittest.TestCase):
'command': 'true' 'command': 'true'
} }
@mock.patch.dict(os.environ)
def test_config_file_with_options_environment_file(self):
project_dir = 'tests/fixtures/default-env-file'
service_dicts = config.load(
config.find(
project_dir, None, Environment.from_env_file(project_dir, '.env2')
)
).services
assert service_dicts[0] == {
'name': 'web',
'image': 'alpine:latest',
'ports': [
types.ServicePort.parse('5644')[0],
types.ServicePort.parse('9998')[0]
],
'command': 'false'
}
@mock.patch.dict(os.environ) @mock.patch.dict(os.environ)
def test_config_file_with_environment_variable(self): def test_config_file_with_environment_variable(self):
project_dir = 'tests/fixtures/environment-interpolation' project_dir = 'tests/fixtures/environment-interpolation'
@ -3532,8 +3570,8 @@ class InterpolationTest(unittest.TestCase):
with mock.patch('compose.config.environment.log') as log: with mock.patch('compose.config.environment.log') as log:
config.load(config_details) config.load(config_details)
assert 2 == log.warn.call_count assert 2 == log.warning.call_count
warnings = sorted(args[0][0] for args in log.warn.call_args_list) warnings = sorted(args[0][0] for args in log.warning.call_args_list)
assert 'BAR' in warnings[0] assert 'BAR' in warnings[0]
assert 'FOO' in warnings[1] assert 'FOO' in warnings[1]
@ -3563,8 +3601,8 @@ class InterpolationTest(unittest.TestCase):
with mock.patch('compose.config.config.log') as log: with mock.patch('compose.config.config.log') as log:
config.load(config_details, compatibility=True) config.load(config_details, compatibility=True)
assert log.warn.call_count == 1 assert log.warning.call_count == 1
warn_message = log.warn.call_args[0][0] warn_message = log.warning.call_args[0][0]
assert warn_message.startswith( assert warn_message.startswith(
'The following deploy sub-keys are not supported in compatibility mode' 'The following deploy sub-keys are not supported in compatibility mode'
) )
@ -3603,7 +3641,7 @@ class InterpolationTest(unittest.TestCase):
with mock.patch('compose.config.config.log') as log: with mock.patch('compose.config.config.log') as log:
cfg = config.load(config_details, compatibility=True) cfg = config.load(config_details, compatibility=True)
assert log.warn.call_count == 0 assert log.warning.call_count == 0
service_dict = cfg.services[0] service_dict = cfg.services[0]
assert service_dict == { assert service_dict == {
@ -5327,6 +5365,28 @@ class SerializeTest(unittest.TestCase):
assert serialized_service['command'] == 'echo $$FOO' assert serialized_service['command'] == 'echo $$FOO'
assert serialized_service['entrypoint'][0] == '$$SHELL' assert serialized_service['entrypoint'][0] == '$$SHELL'
def test_serialize_escape_dont_interpolate(self):
cfg = {
'version': '2.2',
'services': {
'web': {
'image': 'busybox',
'command': 'echo $FOO',
'environment': {
'CURRENCY': '$'
},
'entrypoint': ['$SHELL', '-c'],
}
}
}
config_dict = config.load(build_config_details(cfg), interpolate=False)
serialized_config = yaml.load(serialize_config(config_dict, escape_dollar=False))
serialized_service = serialized_config['services']['web']
assert serialized_service['environment']['CURRENCY'] == '$'
assert serialized_service['command'] == 'echo $FOO'
assert serialized_service['entrypoint'][0] == '$SHELL'
def test_serialize_unicode_values(self): def test_serialize_unicode_values(self):
cfg = { cfg = {
'version': '2.3', 'version': '2.3',

View File

@ -165,6 +165,6 @@ class NetworkTest(unittest.TestCase):
with mock.patch('compose.network.log') as mock_log: with mock.patch('compose.network.log') as mock_log:
check_remote_network_config(remote, net) check_remote_network_config(remote, net)
mock_log.warn.assert_called_once_with(mock.ANY) mock_log.warning.assert_called_once_with(mock.ANY)
_, args, kwargs = mock_log.warn.mock_calls[0] _, args, kwargs = mock_log.warning.mock_calls[0]
assert 'label "com.project.touhou.character" has changed' in args[0] assert 'label "com.project.touhou.character" has changed' in args[0]

View File

@ -15,6 +15,8 @@ from compose.config.types import VolumeFromSpec
from compose.const import COMPOSEFILE_V1 as V1 from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_0 as V2_0 from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_4 as V2_4 from compose.const import COMPOSEFILE_V2_4 as V2_4
from compose.const import COMPOSEFILE_V3_7 as V3_7
from compose.const import DEFAULT_TIMEOUT
from compose.const import LABEL_SERVICE from compose.const import LABEL_SERVICE
from compose.container import Container from compose.container import Container
from compose.errors import OperationFailedError from compose.errors import OperationFailedError
@ -765,6 +767,34 @@ class ProjectTest(unittest.TestCase):
) )
assert project.get_service('web').platform == 'linux/s390x' assert project.get_service('web').platform == 'linux/s390x'
def test_build_container_operation_with_timeout_func_does_not_mutate_options_with_timeout(self):
config_data = Config(
version=V3_7,
services=[
{'name': 'web', 'image': 'busybox:latest'},
{'name': 'db', 'image': 'busybox:latest', 'stop_grace_period': '1s'},
],
networks={}, volumes={}, secrets=None, configs=None,
)
project = Project.from_config(name='test', client=self.mock_client, config_data=config_data)
stop_op = project.build_container_operation_with_timeout_func('stop', options={})
web_container = mock.create_autospec(Container, service='web')
db_container = mock.create_autospec(Container, service='db')
# `stop_grace_period` is not set to 'web' service,
# then it is stopped with the default timeout.
stop_op(web_container)
web_container.stop.assert_called_once_with(timeout=DEFAULT_TIMEOUT)
# `stop_grace_period` is set to 'db' service,
# then it is stopped with the specified timeout and
# the value is not overridden by the previous function call.
stop_op(db_container)
db_container.stop.assert_called_once_with(timeout=1)
@mock.patch('compose.parallel.ParallelStreamWriter._write_noansi') @mock.patch('compose.parallel.ParallelStreamWriter._write_noansi')
def test_error_parallel_pull(self, mock_write): def test_error_parallel_pull(self, mock_write):
project = Project.from_config( project = Project.from_config(

View File

@ -333,7 +333,7 @@ class ServiceTest(unittest.TestCase):
assert service.options['environment'] == environment assert service.options['environment'] == environment
assert opts['labels'][LABEL_CONFIG_HASH] == \ assert opts['labels'][LABEL_CONFIG_HASH] == \
'2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa' '689149e6041a85f6fb4945a2146a497ed43c8a5cbd8991753d875b165f1b4de4'
assert opts['environment'] == ['also=real'] assert opts['environment'] == ['also=real']
def test_get_container_create_options_sets_affinity_with_binds(self): def test_get_container_create_options_sets_affinity_with_binds(self):
@ -516,8 +516,8 @@ class ServiceTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log: with mock.patch('compose.service.log', autospec=True) as mock_log:
service.create_container() service.create_container()
assert mock_log.warn.called assert mock_log.warning.called
_, args, _ = mock_log.warn.mock_calls[0] _, args, _ = mock_log.warning.mock_calls[0]
assert 'was built because it did not already exist' in args[0] assert 'was built because it did not already exist' in args[0]
assert self.mock_client.build.call_count == 1 assert self.mock_client.build.call_count == 1
@ -546,7 +546,7 @@ class ServiceTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log: with mock.patch('compose.service.log', autospec=True) as mock_log:
service.ensure_image_exists(do_build=BuildAction.force) service.ensure_image_exists(do_build=BuildAction.force)
assert not mock_log.warn.called assert not mock_log.warning.called
assert self.mock_client.build.call_count == 1 assert self.mock_client.build.call_count == 1
self.mock_client.build.call_args[1]['tag'] == 'default_foo' self.mock_client.build.call_args[1]['tag'] == 'default_foo'
@ -676,6 +676,7 @@ class ServiceTest(unittest.TestCase):
'options': {'image': 'example.com/foo'}, 'options': {'image': 'example.com/foo'},
'links': [('one', 'one')], 'links': [('one', 'one')],
'net': 'other', 'net': 'other',
'secrets': [],
'networks': {'default': None}, 'networks': {'default': None},
'volumes_from': [('two', 'rw')], 'volumes_from': [('two', 'rw')],
} }
@ -698,6 +699,7 @@ class ServiceTest(unittest.TestCase):
'options': {'image': 'example.com/foo'}, 'options': {'image': 'example.com/foo'},
'links': [], 'links': [],
'networks': {}, 'networks': {},
'secrets': [],
'net': 'aaabbb', 'net': 'aaabbb',
'volumes_from': [], 'volumes_from': [],
} }
@ -845,13 +847,13 @@ class ServiceTest(unittest.TestCase):
ports=["8080:80"]) ports=["8080:80"])
service.scale(0) service.scale(0)
assert not mock_log.warn.called assert not mock_log.warning.called
service.scale(1) service.scale(1)
assert not mock_log.warn.called assert not mock_log.warning.called
service.scale(2) service.scale(2)
mock_log.warn.assert_called_once_with( mock_log.warning.assert_called_once_with(
'The "{}" service specifies a port on the host. If multiple containers ' 'The "{}" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'.format(name)) 'for this service are created on a single host, the port will clash.'.format(name))
@ -1389,7 +1391,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log: with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service) warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called assert not mock_log.warning.called
def test_warn_on_masked_volume_when_masked(self): def test_warn_on_masked_volume_when_masked(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')] volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
@ -1402,7 +1404,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log: with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service) warn_on_masked_volume(volumes_option, container_volumes, service)
mock_log.warn.assert_called_once_with(mock.ANY) mock_log.warning.assert_called_once_with(mock.ANY)
def test_warn_on_masked_no_warning_with_same_path(self): def test_warn_on_masked_no_warning_with_same_path(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')] volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
@ -1412,7 +1414,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log: with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service) warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called assert not mock_log.warning.called
def test_warn_on_masked_no_warning_with_container_only_option(self): def test_warn_on_masked_no_warning_with_container_only_option(self):
volumes_option = [VolumeSpec(None, '/path', 'rw')] volumes_option = [VolumeSpec(None, '/path', 'rw')]
@ -1424,7 +1426,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log: with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service) warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called assert not mock_log.warning.called
def test_create_with_special_volume_mode(self): def test_create_with_special_volume_mode(self):
self.mock_client.inspect_image.return_value = {'Id': 'imageid'} self.mock_client.inspect_image.return_value = {'Id': 'imageid'}

View File

@ -1,5 +1,5 @@
[tox] [tox]
envlist = py27,py36,py37,pre-commit envlist = py27,py37,pre-commit
[testenv] [testenv]
usedevelop=True usedevelop=True