mirror of
https://github.com/docker/compose.git
synced 2025-07-30 00:54:19 +02:00
commit
c5d5d42158
@ -2,7 +2,7 @@ version: 2
|
|||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
macos:
|
macos:
|
||||||
xcode: "8.3.3"
|
xcode: "9.4.1"
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@ -13,11 +13,11 @@ jobs:
|
|||||||
command: sudo pip install --upgrade tox==2.1.1
|
command: sudo pip install --upgrade tox==2.1.1
|
||||||
- run:
|
- run:
|
||||||
name: unit tests
|
name: unit tests
|
||||||
command: tox -e py27,py36 -- tests/unit
|
command: tox -e py27,py36,py37 -- tests/unit
|
||||||
|
|
||||||
build-osx-binary:
|
build-osx-binary:
|
||||||
macos:
|
macos:
|
||||||
xcode: "8.3.3"
|
xcode: "9.4.1"
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@ -25,18 +25,17 @@ jobs:
|
|||||||
command: sudo pip install --upgrade pip virtualenv
|
command: sudo pip install --upgrade pip virtualenv
|
||||||
- run:
|
- run:
|
||||||
name: setup script
|
name: setup script
|
||||||
command: ./script/setup/osx
|
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
|
||||||
- run:
|
- run:
|
||||||
name: build script
|
name: build script
|
||||||
command: ./script/build/osx
|
command: ./script/build/osx
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: dist/docker-compose-Darwin-x86_64
|
path: dist/docker-compose-Darwin-x86_64
|
||||||
destination: docker-compose-Darwin-x86_64
|
destination: docker-compose-Darwin-x86_64
|
||||||
# - deploy:
|
- deploy:
|
||||||
# name: Deploy binary to bintray
|
name: Deploy binary to bintray
|
||||||
# command: |
|
command: |
|
||||||
# OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
|
OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
|
||||||
|
|
||||||
|
|
||||||
build-linux-binary:
|
build-linux-binary:
|
||||||
machine:
|
machine:
|
||||||
@ -54,28 +53,6 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
|
OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
|
||||||
|
|
||||||
trigger-osx-binary-deploy:
|
|
||||||
# We use a separate repo to build OSX binaries meant for distribution
|
|
||||||
# with support for OSSX 10.11 (xcode 7). This job triggers a build on
|
|
||||||
# that repo.
|
|
||||||
docker:
|
|
||||||
- image: alpine:3.6
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: install curl
|
|
||||||
command: apk update && apk add curl
|
|
||||||
|
|
||||||
- run:
|
|
||||||
name: API trigger
|
|
||||||
command: |
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d "{\
|
|
||||||
\"build_parameters\": {\
|
|
||||||
\"COMPOSE_BRANCH\": \"${CIRCLE_BRANCH}\"\
|
|
||||||
}\
|
|
||||||
}" https://circleci.com/api/v1.1/project/github/docker/compose-osx-release?circle-token=${OSX_RELEASE_TOKEN} \
|
|
||||||
> /dev/null
|
|
||||||
|
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
version: 2
|
||||||
@ -84,9 +61,3 @@ workflows:
|
|||||||
- test
|
- test
|
||||||
- build-linux-binary
|
- build-linux-binary
|
||||||
- build-osx-binary
|
- build-osx-binary
|
||||||
- trigger-osx-binary-deploy:
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- /bump-.*/
|
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -13,3 +13,4 @@ compose/GITSHA
|
|||||||
*.swp
|
*.swp
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.cache
|
.cache
|
||||||
|
.idea
|
||||||
|
66
CHANGELOG.md
66
CHANGELOG.md
@ -1,6 +1,66 @@
|
|||||||
Change log
|
Change log
|
||||||
==========
|
==========
|
||||||
|
|
||||||
|
1.23.0 (2018-10-10)
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
### Important note
|
||||||
|
|
||||||
|
The default naming scheme for containers created by Compose in this version
|
||||||
|
has changed from `<project>_<service>_<index>` to
|
||||||
|
`<project>_<service>_<index>_<slug>`, where `<slug>` is a randomly-generated
|
||||||
|
hexadecimal string. Please make sure to update scripts relying on the old
|
||||||
|
naming scheme accordingly before upgrading.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Logs for containers restarting after a crash will now appear in the output
|
||||||
|
of the `up` and `logs` commands.
|
||||||
|
|
||||||
|
- Added `--hash` option to the `docker-compose config` command, allowing users
|
||||||
|
to print a hash string for each service's configuration to facilitate rolling
|
||||||
|
updates.
|
||||||
|
|
||||||
|
- Output for the `pull` command now reports status / progress even when pulling
|
||||||
|
multiple images in parallel.
|
||||||
|
|
||||||
|
- For images with multiple names, Compose will now attempt to match the one
|
||||||
|
present in the service configuration in the output of the `images` command.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- Parallel `run` commands for the same service will no longer fail due to name
|
||||||
|
collisions.
|
||||||
|
|
||||||
|
- Fixed an issue where paths longer than 260 characters on Windows clients would
|
||||||
|
cause `docker-compose build` to fail.
|
||||||
|
|
||||||
|
- Fixed a bug where attempting to mount `/var/run/docker.sock` with
|
||||||
|
Docker Desktop for Windows would result in failure.
|
||||||
|
|
||||||
|
- The `--project-directory` option is now used by Compose to determine where to
|
||||||
|
look for the `.env` file.
|
||||||
|
|
||||||
|
- `docker-compose build` no longer fails when attempting to pull an image with
|
||||||
|
credentials provided by the gcloud credential helper.
|
||||||
|
|
||||||
|
- Fixed the `--exit-code-from` option in `docker-compose up` to always report
|
||||||
|
the actual exit code even when the watched container isn't the cause of the
|
||||||
|
exit.
|
||||||
|
|
||||||
|
- Fixed a bug that caused hash configuration with multiple networks to be
|
||||||
|
inconsistent, causing some services to be unnecessarily restarted.
|
||||||
|
|
||||||
|
- Fixed a pipe handling issue when using the containerized version of Compose.
|
||||||
|
|
||||||
|
- Fixed a bug causing `external: false` entries in the Compose file to be
|
||||||
|
printed as `external: true` in the output of `docker-compose config`
|
||||||
|
|
||||||
|
### Miscellaneous
|
||||||
|
|
||||||
|
- The `zsh` completion script has been updated with new options, and no
|
||||||
|
longer suggests container names where service names are expected.
|
||||||
|
|
||||||
1.22.0 (2018-07-17)
|
1.22.0 (2018-07-17)
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
@ -60,7 +120,7 @@ Change log
|
|||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
- Fixed a bug where the ip_range attirbute in IPAM configs was prevented
|
- Fixed a bug where the ip_range attribute in IPAM configs was prevented
|
||||||
from passing validation
|
from passing validation
|
||||||
|
|
||||||
1.21.1 (2018-04-27)
|
1.21.1 (2018-04-27)
|
||||||
@ -285,7 +345,7 @@ Change log
|
|||||||
preventing Compose from recovering volume data from previous containers for
|
preventing Compose from recovering volume data from previous containers for
|
||||||
anonymous volumes
|
anonymous volumes
|
||||||
|
|
||||||
- Added limit for number of simulatenous parallel operations, which should
|
- Added limit for number of simultaneous parallel operations, which should
|
||||||
prevent accidental resource exhaustion of the server. Default is 64 and
|
prevent accidental resource exhaustion of the server. Default is 64 and
|
||||||
can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable
|
can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable
|
||||||
|
|
||||||
@ -583,7 +643,7 @@ Change log
|
|||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
- Volumes specified through the `--volume` flag of `docker-compose run` now
|
- Volumes specified through the `--volume` flag of `docker-compose run` now
|
||||||
complement volumes declared in the service's defintion instead of replacing
|
complement volumes declared in the service's definition instead of replacing
|
||||||
them
|
them
|
||||||
|
|
||||||
- Fixed a bug where using multiple Compose files would unset the scale value
|
- Fixed a bug where using multiple Compose files would unset the scale value
|
||||||
|
@ -1,55 +1,21 @@
|
|||||||
FROM armhf/debian:wheezy
|
FROM python:3.6
|
||||||
|
|
||||||
RUN set -ex; \
|
RUN set -ex; \
|
||||||
apt-get update -qq; \
|
apt-get update -qq; \
|
||||||
apt-get install -y \
|
apt-get install -y \
|
||||||
locales \
|
locales \
|
||||||
gcc \
|
|
||||||
make \
|
|
||||||
zlib1g \
|
|
||||||
zlib1g-dev \
|
|
||||||
libssl-dev \
|
|
||||||
git \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
curl \
|
||||||
libsqlite3-dev \
|
python-dev \
|
||||||
libbz2-dev \
|
git
|
||||||
; \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
|
RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
|
||||||
|
SHA256=f8de6378dad825b9fd5c3c2f949e791d22f918623c27a72c84fd6975a0e5d0a2; \
|
||||||
|
echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
|
||||||
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
|
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
|
||||||
mv docker /usr/local/bin/docker && \
|
mv docker /usr/local/bin/docker && \
|
||||||
chmod +x /usr/local/bin/docker && \
|
chmod +x /usr/local/bin/docker && \
|
||||||
rm dockerbins.tgz
|
rm dockerbins.tgz
|
||||||
|
|
||||||
# Build Python 2.7.13 from source
|
|
||||||
RUN set -ex; \
|
|
||||||
curl -L https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz | tar -xz; \
|
|
||||||
cd Python-2.7.13; \
|
|
||||||
./configure --enable-shared; \
|
|
||||||
make; \
|
|
||||||
make install; \
|
|
||||||
cd ..; \
|
|
||||||
rm -rf /Python-2.7.13
|
|
||||||
|
|
||||||
# Build python 3.6 from source
|
|
||||||
RUN set -ex; \
|
|
||||||
curl -L https://www.python.org/ftp/python/3.6.4/Python-3.6.4.tgz | tar -xz; \
|
|
||||||
cd Python-3.6.4; \
|
|
||||||
./configure --enable-shared; \
|
|
||||||
make; \
|
|
||||||
make install; \
|
|
||||||
cd ..; \
|
|
||||||
rm -rf /Python-3.6.4
|
|
||||||
|
|
||||||
# Make libpython findable
|
|
||||||
ENV LD_LIBRARY_PATH /usr/local/lib
|
|
||||||
|
|
||||||
# Install pip
|
|
||||||
RUN set -ex; \
|
|
||||||
curl -L https://bootstrap.pypa.io/get-pip.py | python
|
|
||||||
|
|
||||||
# Python3 requires a valid locale
|
# Python3 requires a valid locale
|
||||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||||
ENV LANG en_US.UTF-8
|
ENV LANG en_US.UTF-8
|
||||||
@ -70,4 +36,4 @@ RUN tox --notest
|
|||||||
ADD . /code/
|
ADD . /code/
|
||||||
RUN chown -R user /code/
|
RUN chown -R user /code/
|
||||||
|
|
||||||
ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"]
|
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
|
||||||
|
@ -4,7 +4,7 @@ ENV GLIBC 2.27-r0
|
|||||||
ENV DOCKERBINS_SHA 1270dce1bd7e1838d62ae21d2505d87f16efc1d9074645571daaefdfd0c14054
|
ENV DOCKERBINS_SHA 1270dce1bd7e1838d62ae21d2505d87f16efc1d9074645571daaefdfd0c14054
|
||||||
|
|
||||||
RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
|
RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
|
||||||
curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub && \
|
curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
|
||||||
curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
|
curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
|
||||||
apk add --no-cache glibc-$GLIBC.apk && \
|
apk add --no-cache glibc-$GLIBC.apk && \
|
||||||
ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
|
ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
|
||||||
|
3
Jenkinsfile
vendored
3
Jenkinsfile
vendored
@ -74,10 +74,11 @@ buildImage()
|
|||||||
def testMatrix = [failFast: true]
|
def testMatrix = [failFast: true]
|
||||||
def docker_versions = get_versions(2)
|
def docker_versions = get_versions(2)
|
||||||
|
|
||||||
for (int i = 0 ;i < docker_versions.length ; i++) {
|
for (int i = 0; i < docker_versions.length; i++) {
|
||||||
def dockerVersion = docker_versions[i]
|
def dockerVersion = docker_versions[i]
|
||||||
testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"])
|
testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"])
|
||||||
testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"])
|
testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"])
|
||||||
|
testMatrix["${dockerVersion}_py37"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py37"])
|
||||||
}
|
}
|
||||||
|
|
||||||
parallel(testMatrix)
|
parallel(testMatrix)
|
||||||
|
@ -10,7 +10,7 @@ install:
|
|||||||
build: false
|
build: false
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- "tox -e py27,py36 -- tests/unit"
|
- "tox -e py27,py36,py37 -- tests/unit"
|
||||||
- ps: ".\\script\\build\\windows.ps1"
|
- ps: ".\\script\\build\\windows.ps1"
|
||||||
|
|
||||||
artifacts:
|
artifacts:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__version__ = '1.22.0'
|
__version__ = '1.23.0-rc1'
|
||||||
|
@ -210,10 +210,15 @@ def start_producer_thread(thread_args):
|
|||||||
|
|
||||||
|
|
||||||
def watch_events(thread_map, event_stream, presenters, thread_args):
|
def watch_events(thread_map, event_stream, presenters, thread_args):
|
||||||
|
crashed_containers = set()
|
||||||
for event in event_stream:
|
for event in event_stream:
|
||||||
if event['action'] == 'stop':
|
if event['action'] == 'stop':
|
||||||
thread_map.pop(event['id'], None)
|
thread_map.pop(event['id'], None)
|
||||||
|
|
||||||
|
if event['action'] == 'die':
|
||||||
|
thread_map.pop(event['id'], None)
|
||||||
|
crashed_containers.add(event['id'])
|
||||||
|
|
||||||
if event['action'] != 'start':
|
if event['action'] != 'start':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -223,6 +228,11 @@ def watch_events(thread_map, event_stream, presenters, thread_args):
|
|||||||
# Container was stopped and started, we need a new thread
|
# Container was stopped and started, we need a new thread
|
||||||
thread_map.pop(event['id'], None)
|
thread_map.pop(event['id'], None)
|
||||||
|
|
||||||
|
# Container crashed so we should reattach to it
|
||||||
|
if event['id'] in crashed_containers:
|
||||||
|
event['container'].attach_log_stream()
|
||||||
|
crashed_containers.remove(event['id'])
|
||||||
|
|
||||||
thread_map[event['id']] = build_thread(
|
thread_map[event['id']] = build_thread(
|
||||||
event['container'],
|
event['container'],
|
||||||
next(presenters),
|
next(presenters),
|
||||||
|
@ -238,11 +238,14 @@ class TopLevelCommand(object):
|
|||||||
version Show the Docker-Compose version information
|
version Show the Docker-Compose version information
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, project, project_dir='.', options=None):
|
def __init__(self, project, options=None):
|
||||||
self.project = project
|
self.project = project
|
||||||
self.project_dir = '.'
|
|
||||||
self.toplevel_options = options or {}
|
self.toplevel_options = options or {}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def project_dir(self):
|
||||||
|
return self.toplevel_options.get('--project-directory') or '.'
|
||||||
|
|
||||||
def build(self, options):
|
def build(self, options):
|
||||||
"""
|
"""
|
||||||
Build or rebuild services.
|
Build or rebuild services.
|
||||||
@ -260,6 +263,7 @@ class TopLevelCommand(object):
|
|||||||
--pull Always attempt to pull a newer version of the image.
|
--pull Always attempt to pull a newer version of the image.
|
||||||
-m, --memory MEM Sets memory limit for the build container.
|
-m, --memory MEM Sets memory limit for the build container.
|
||||||
--build-arg key=val Set build-time variables for services.
|
--build-arg key=val Set build-time variables for services.
|
||||||
|
--parallel Build images in parallel.
|
||||||
"""
|
"""
|
||||||
service_names = options['SERVICE']
|
service_names = options['SERVICE']
|
||||||
build_args = options.get('--build-arg', None)
|
build_args = options.get('--build-arg', None)
|
||||||
@ -280,6 +284,7 @@ class TopLevelCommand(object):
|
|||||||
memory=options.get('--memory'),
|
memory=options.get('--memory'),
|
||||||
build_args=build_args,
|
build_args=build_args,
|
||||||
gzip=options.get('--compress', False),
|
gzip=options.get('--compress', False),
|
||||||
|
parallel_build=options.get('--parallel', False),
|
||||||
)
|
)
|
||||||
|
|
||||||
def bundle(self, options):
|
def bundle(self, options):
|
||||||
@ -326,7 +331,9 @@ class TopLevelCommand(object):
|
|||||||
anything.
|
anything.
|
||||||
--services Print the service names, one per line.
|
--services Print the service names, one per line.
|
||||||
--volumes Print the volume names, one per line.
|
--volumes Print the volume names, one per line.
|
||||||
|
--hash="*" Print the service config hash, one per line.
|
||||||
|
Set "service1,service2" for a list of specified services
|
||||||
|
or use the wildcard symbol to display all services
|
||||||
"""
|
"""
|
||||||
|
|
||||||
compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
|
compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
|
||||||
@ -348,6 +355,15 @@ class TopLevelCommand(object):
|
|||||||
print('\n'.join(volume for volume in compose_config.volumes))
|
print('\n'.join(volume for volume in compose_config.volumes))
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if options['--hash'] is not None:
|
||||||
|
h = options['--hash']
|
||||||
|
self.project = project_from_options('.', self.toplevel_options)
|
||||||
|
services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
|
||||||
|
with errors.handle_connection_errors(self.project.client):
|
||||||
|
for service in self.project.get_services(services):
|
||||||
|
print('{} {}'.format(service.name, service.config_hash))
|
||||||
|
return
|
||||||
|
|
||||||
print(serialize_config(compose_config, image_digests))
|
print(serialize_config(compose_config, image_digests))
|
||||||
|
|
||||||
def create(self, options):
|
def create(self, options):
|
||||||
@ -552,31 +568,43 @@ class TopLevelCommand(object):
|
|||||||
if options['--quiet']:
|
if options['--quiet']:
|
||||||
for image in set(c.image for c in containers):
|
for image in set(c.image for c in containers):
|
||||||
print(image.split(':')[1])
|
print(image.split(':')[1])
|
||||||
else:
|
return
|
||||||
headers = [
|
|
||||||
'Container',
|
def add_default_tag(img_name):
|
||||||
'Repository',
|
if ':' not in img_name.split('/')[-1]:
|
||||||
'Tag',
|
return '{}:latest'.format(img_name)
|
||||||
'Image Id',
|
return img_name
|
||||||
'Size'
|
|
||||||
]
|
headers = [
|
||||||
rows = []
|
'Container',
|
||||||
for container in containers:
|
'Repository',
|
||||||
image_config = container.image_config
|
'Tag',
|
||||||
repo_tags = (
|
'Image Id',
|
||||||
image_config['RepoTags'][0].rsplit(':', 1) if image_config['RepoTags']
|
'Size'
|
||||||
else ('<none>', '<none>')
|
]
|
||||||
)
|
rows = []
|
||||||
image_id = image_config['Id'].split(':')[1][:12]
|
for container in containers:
|
||||||
size = human_readable_file_size(image_config['Size'])
|
image_config = container.image_config
|
||||||
rows.append([
|
service = self.project.get_service(container.service)
|
||||||
container.name,
|
index = 0
|
||||||
repo_tags[0],
|
img_name = add_default_tag(service.image_name)
|
||||||
repo_tags[1],
|
if img_name in image_config['RepoTags']:
|
||||||
image_id,
|
index = image_config['RepoTags'].index(img_name)
|
||||||
size
|
repo_tags = (
|
||||||
])
|
image_config['RepoTags'][index].rsplit(':', 1) if image_config['RepoTags']
|
||||||
print(Formatter().table(headers, rows))
|
else ('<none>', '<none>')
|
||||||
|
)
|
||||||
|
|
||||||
|
image_id = image_config['Id'].split(':')[1][:12]
|
||||||
|
size = human_readable_file_size(image_config['Size'])
|
||||||
|
rows.append([
|
||||||
|
container.name,
|
||||||
|
repo_tags[0],
|
||||||
|
repo_tags[1],
|
||||||
|
image_id,
|
||||||
|
size
|
||||||
|
])
|
||||||
|
print(Formatter().table(headers, rows))
|
||||||
|
|
||||||
def kill(self, options):
|
def kill(self, options):
|
||||||
"""
|
"""
|
||||||
@ -1085,12 +1113,15 @@ class TopLevelCommand(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.project.stop(service_names=service_names, timeout=timeout)
|
self.project.stop(service_names=service_names, timeout=timeout)
|
||||||
|
if exit_value_from:
|
||||||
|
exit_code = compute_service_exit_code(exit_value_from, attached_containers)
|
||||||
|
|
||||||
sys.exit(exit_code)
|
sys.exit(exit_code)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def version(cls, options):
|
def version(cls, options):
|
||||||
"""
|
"""
|
||||||
Show version informations
|
Show version information
|
||||||
|
|
||||||
Usage: version [--short]
|
Usage: version [--short]
|
||||||
|
|
||||||
@ -1103,33 +1134,33 @@ class TopLevelCommand(object):
|
|||||||
print(get_version_info('full'))
|
print(get_version_info('full'))
|
||||||
|
|
||||||
|
|
||||||
|
def compute_service_exit_code(exit_value_from, attached_containers):
|
||||||
|
candidates = list(filter(
|
||||||
|
lambda c: c.service == exit_value_from,
|
||||||
|
attached_containers))
|
||||||
|
if not candidates:
|
||||||
|
log.error(
|
||||||
|
'No containers matching the spec "{0}" '
|
||||||
|
'were run.'.format(exit_value_from)
|
||||||
|
)
|
||||||
|
return 2
|
||||||
|
if len(candidates) > 1:
|
||||||
|
exit_values = filter(
|
||||||
|
lambda e: e != 0,
|
||||||
|
[c.inspect()['State']['ExitCode'] for c in candidates]
|
||||||
|
)
|
||||||
|
|
||||||
|
return exit_values[0]
|
||||||
|
return candidates[0].inspect()['State']['ExitCode']
|
||||||
|
|
||||||
|
|
||||||
def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
|
def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
|
||||||
exit_code = 0
|
exit_code = 0
|
||||||
if exit_value_from:
|
for e in all_containers:
|
||||||
candidates = list(filter(
|
if (not e.is_running and cascade_starter == e.name):
|
||||||
lambda c: c.service == exit_value_from,
|
if not e.exit_code == 0:
|
||||||
attached_containers))
|
exit_code = e.exit_code
|
||||||
if not candidates:
|
break
|
||||||
log.error(
|
|
||||||
'No containers matching the spec "{0}" '
|
|
||||||
'were run.'.format(exit_value_from)
|
|
||||||
)
|
|
||||||
exit_code = 2
|
|
||||||
elif len(candidates) > 1:
|
|
||||||
exit_values = filter(
|
|
||||||
lambda e: e != 0,
|
|
||||||
[c.inspect()['State']['ExitCode'] for c in candidates]
|
|
||||||
)
|
|
||||||
|
|
||||||
exit_code = exit_values[0]
|
|
||||||
else:
|
|
||||||
exit_code = candidates[0].inspect()['State']['ExitCode']
|
|
||||||
else:
|
|
||||||
for e in all_containers:
|
|
||||||
if (not e.is_running and cascade_starter == e.name):
|
|
||||||
if not e.exit_code == 0:
|
|
||||||
exit_code = e.exit_code
|
|
||||||
break
|
|
||||||
|
|
||||||
return exit_code
|
return exit_code
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ def denormalize_config(config, image_digests=None):
|
|||||||
config.version >= V3_0 and config.version < v3_introduced_name_key(key)):
|
config.version >= V3_0 and config.version < v3_introduced_name_key(key)):
|
||||||
del conf['name']
|
del conf['name']
|
||||||
elif 'external' in conf:
|
elif 'external' in conf:
|
||||||
conf['external'] = True
|
conf['external'] = bool(conf['external'])
|
||||||
|
|
||||||
if 'attachable' in conf and config.version < V3_2:
|
if 'attachable' in conf and config.version < V3_2:
|
||||||
# For compatibility mode, this option is invalid in v2
|
# For compatibility mode, this option is invalid in v2
|
||||||
|
@ -136,6 +136,20 @@ def normalize_path_for_engine(path):
|
|||||||
return path.replace('\\', '/')
|
return path.replace('\\', '/')
|
||||||
|
|
||||||
|
|
||||||
|
def normpath(path, win_host=False):
|
||||||
|
""" Custom path normalizer that handles Compose-specific edge cases like
|
||||||
|
UNIX paths on Windows hosts and vice-versa. """
|
||||||
|
|
||||||
|
sysnorm = ntpath.normpath if win_host else os.path.normpath
|
||||||
|
# If a path looks like a UNIX absolute path on Windows, it probably is;
|
||||||
|
# we'll need to revert the backslashes to forward slashes after normalization
|
||||||
|
flip_slashes = path.startswith('/') and IS_WINDOWS_PLATFORM
|
||||||
|
path = sysnorm(path)
|
||||||
|
if flip_slashes:
|
||||||
|
path = path.replace('\\', '/')
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
class MountSpec(object):
|
class MountSpec(object):
|
||||||
options_map = {
|
options_map = {
|
||||||
'volume': {
|
'volume': {
|
||||||
@ -152,12 +166,11 @@ class MountSpec(object):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def parse(cls, mount_dict, normalize=False, win_host=False):
|
def parse(cls, mount_dict, normalize=False, win_host=False):
|
||||||
normpath = ntpath.normpath if win_host else os.path.normpath
|
|
||||||
if mount_dict.get('source'):
|
if mount_dict.get('source'):
|
||||||
if mount_dict['type'] == 'tmpfs':
|
if mount_dict['type'] == 'tmpfs':
|
||||||
raise ConfigurationError('tmpfs mounts can not specify a source')
|
raise ConfigurationError('tmpfs mounts can not specify a source')
|
||||||
|
|
||||||
mount_dict['source'] = normpath(mount_dict['source'])
|
mount_dict['source'] = normpath(mount_dict['source'], win_host)
|
||||||
if normalize:
|
if normalize:
|
||||||
mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
|
mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
|
||||||
|
|
||||||
@ -247,7 +260,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
|
|||||||
else:
|
else:
|
||||||
external = parts[0]
|
external = parts[0]
|
||||||
parts = separate_next_section(parts[1])
|
parts = separate_next_section(parts[1])
|
||||||
external = ntpath.normpath(external)
|
external = normpath(external, True)
|
||||||
internal = parts[0]
|
internal = parts[0]
|
||||||
if len(parts) > 1:
|
if len(parts) > 1:
|
||||||
if ':' in parts[1]:
|
if ':' in parts[1]:
|
||||||
|
@ -15,12 +15,14 @@ LABEL_PROJECT = 'com.docker.compose.project'
|
|||||||
LABEL_SERVICE = 'com.docker.compose.service'
|
LABEL_SERVICE = 'com.docker.compose.service'
|
||||||
LABEL_NETWORK = 'com.docker.compose.network'
|
LABEL_NETWORK = 'com.docker.compose.network'
|
||||||
LABEL_VERSION = 'com.docker.compose.version'
|
LABEL_VERSION = 'com.docker.compose.version'
|
||||||
|
LABEL_SLUG = 'com.docker.compose.slug'
|
||||||
LABEL_VOLUME = 'com.docker.compose.volume'
|
LABEL_VOLUME = 'com.docker.compose.volume'
|
||||||
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
|
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
|
||||||
NANOCPUS_SCALE = 1000000000
|
NANOCPUS_SCALE = 1000000000
|
||||||
PARALLEL_LIMIT = 64
|
PARALLEL_LIMIT = 64
|
||||||
|
|
||||||
SECRETS_PATH = '/run/secrets'
|
SECRETS_PATH = '/run/secrets'
|
||||||
|
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
|
||||||
|
|
||||||
COMPOSEFILE_V1 = ComposeVersion('1')
|
COMPOSEFILE_V1 = ComposeVersion('1')
|
||||||
COMPOSEFILE_V2_0 = ComposeVersion('2.0')
|
COMPOSEFILE_V2_0 = ComposeVersion('2.0')
|
||||||
|
@ -9,7 +9,9 @@ from docker.errors import ImageNotFound
|
|||||||
from .const import LABEL_CONTAINER_NUMBER
|
from .const import LABEL_CONTAINER_NUMBER
|
||||||
from .const import LABEL_PROJECT
|
from .const import LABEL_PROJECT
|
||||||
from .const import LABEL_SERVICE
|
from .const import LABEL_SERVICE
|
||||||
|
from .const import LABEL_SLUG
|
||||||
from .const import LABEL_VERSION
|
from .const import LABEL_VERSION
|
||||||
|
from .utils import truncate_id
|
||||||
from .version import ComposeVersion
|
from .version import ComposeVersion
|
||||||
|
|
||||||
|
|
||||||
@ -80,7 +82,7 @@ class Container(object):
|
|||||||
@property
|
@property
|
||||||
def name_without_project(self):
|
def name_without_project(self):
|
||||||
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
|
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
|
||||||
return '{0}_{1}'.format(self.service, self.number)
|
return '{0}_{1}{2}'.format(self.service, self.number, '_' + self.slug if self.slug else '')
|
||||||
else:
|
else:
|
||||||
return self.name
|
return self.name
|
||||||
|
|
||||||
@ -92,6 +94,14 @@ class Container(object):
|
|||||||
self.short_id, LABEL_CONTAINER_NUMBER))
|
self.short_id, LABEL_CONTAINER_NUMBER))
|
||||||
return int(number)
|
return int(number)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def slug(self):
|
||||||
|
return truncate_id(self.full_slug)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def full_slug(self):
|
||||||
|
return self.labels.get(LABEL_SLUG)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ports(self):
|
def ports(self):
|
||||||
self.inspect_if_not_inspected()
|
self.inspect_if_not_inspected()
|
||||||
|
@ -323,7 +323,12 @@ def get_networks(service_dict, network_definitions):
|
|||||||
'Service "{}" uses an undefined network "{}"'
|
'Service "{}" uses an undefined network "{}"'
|
||||||
.format(service_dict['name'], name))
|
.format(service_dict['name'], name))
|
||||||
|
|
||||||
return OrderedDict(sorted(
|
if any([v.get('priority') for v in networks.values()]):
|
||||||
networks.items(),
|
return OrderedDict(sorted(
|
||||||
key=lambda t: t[1].get('priority') or 0, reverse=True
|
networks.items(),
|
||||||
))
|
key=lambda t: t[1].get('priority') or 0, reverse=True
|
||||||
|
))
|
||||||
|
else:
|
||||||
|
# Ensure Compose will pick a consistent primary network if no
|
||||||
|
# priority is set
|
||||||
|
return OrderedDict(sorted(networks.items(), key=lambda t: t[0]))
|
||||||
|
@ -313,6 +313,13 @@ class ParallelStreamWriter(object):
|
|||||||
self._write_ansi(msg, obj_index, color_func(status))
|
self._write_ansi(msg, obj_index, color_func(status))
|
||||||
|
|
||||||
|
|
||||||
|
def get_stream_writer():
|
||||||
|
instance = ParallelStreamWriter.instance
|
||||||
|
if instance is None:
|
||||||
|
raise RuntimeError('ParallelStreamWriter has not yet been instantiated')
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
def parallel_operation(containers, operation, options, message):
|
def parallel_operation(containers, operation, options, message):
|
||||||
parallel_execute(
|
parallel_execute(
|
||||||
containers,
|
containers,
|
||||||
|
@ -19,12 +19,11 @@ def write_to_stream(s, stream):
|
|||||||
def stream_output(output, stream):
|
def stream_output(output, stream):
|
||||||
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
|
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
|
||||||
stream = utils.get_output_stream(stream)
|
stream = utils.get_output_stream(stream)
|
||||||
all_events = []
|
|
||||||
lines = {}
|
lines = {}
|
||||||
diff = 0
|
diff = 0
|
||||||
|
|
||||||
for event in utils.json_stream(output):
|
for event in utils.json_stream(output):
|
||||||
all_events.append(event)
|
yield event
|
||||||
is_progress_event = 'progress' in event or 'progressDetail' in event
|
is_progress_event = 'progress' in event or 'progressDetail' in event
|
||||||
|
|
||||||
if not is_progress_event:
|
if not is_progress_event:
|
||||||
@ -57,8 +56,6 @@ def stream_output(output, stream):
|
|||||||
|
|
||||||
stream.flush()
|
stream.flush()
|
||||||
|
|
||||||
return all_events
|
|
||||||
|
|
||||||
|
|
||||||
def print_output_event(event, stream, is_terminal):
|
def print_output_event(event, stream, is_terminal):
|
||||||
if 'errorDetail' in event:
|
if 'errorDetail' in event:
|
||||||
|
@ -31,7 +31,6 @@ from .service import ConvergenceStrategy
|
|||||||
from .service import NetworkMode
|
from .service import NetworkMode
|
||||||
from .service import PidMode
|
from .service import PidMode
|
||||||
from .service import Service
|
from .service import Service
|
||||||
from .service import ServiceName
|
|
||||||
from .service import ServiceNetworkMode
|
from .service import ServiceNetworkMode
|
||||||
from .service import ServicePidMode
|
from .service import ServicePidMode
|
||||||
from .utils import microseconds_from_time_nano
|
from .utils import microseconds_from_time_nano
|
||||||
@ -198,25 +197,6 @@ class Project(object):
|
|||||||
service.remove_duplicate_containers()
|
service.remove_duplicate_containers()
|
||||||
return services
|
return services
|
||||||
|
|
||||||
def get_scaled_services(self, services, scale_override):
|
|
||||||
"""
|
|
||||||
Returns a list of this project's services as scaled ServiceName objects.
|
|
||||||
|
|
||||||
services: a list of Service objects
|
|
||||||
scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
|
|
||||||
"""
|
|
||||||
service_names = []
|
|
||||||
for service in services:
|
|
||||||
if service.name in scale_override:
|
|
||||||
scale = scale_override[service.name]
|
|
||||||
else:
|
|
||||||
scale = service.scale_num
|
|
||||||
|
|
||||||
for i in range(1, scale + 1):
|
|
||||||
service_names.append(ServiceName(self.name, service.name, i))
|
|
||||||
|
|
||||||
return service_names
|
|
||||||
|
|
||||||
def get_links(self, service_dict):
|
def get_links(self, service_dict):
|
||||||
links = []
|
links = []
|
||||||
if 'links' in service_dict:
|
if 'links' in service_dict:
|
||||||
@ -372,13 +352,36 @@ class Project(object):
|
|||||||
return containers
|
return containers
|
||||||
|
|
||||||
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
|
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
|
||||||
build_args=None, gzip=False):
|
build_args=None, gzip=False, parallel_build=False):
|
||||||
|
|
||||||
|
services = []
|
||||||
for service in self.get_services(service_names):
|
for service in self.get_services(service_names):
|
||||||
if service.can_be_built():
|
if service.can_be_built():
|
||||||
service.build(no_cache, pull, force_rm, memory, build_args, gzip)
|
services.append(service)
|
||||||
else:
|
else:
|
||||||
log.info('%s uses an image, skipping' % service.name)
|
log.info('%s uses an image, skipping' % service.name)
|
||||||
|
|
||||||
|
def build_service(service):
|
||||||
|
service.build(no_cache, pull, force_rm, memory, build_args, gzip)
|
||||||
|
|
||||||
|
if parallel_build:
|
||||||
|
_, errors = parallel.parallel_execute(
|
||||||
|
services,
|
||||||
|
build_service,
|
||||||
|
operator.attrgetter('name'),
|
||||||
|
'Building',
|
||||||
|
limit=5,
|
||||||
|
)
|
||||||
|
if len(errors):
|
||||||
|
combined_errors = '\n'.join([
|
||||||
|
e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
|
||||||
|
])
|
||||||
|
raise ProjectError(combined_errors)
|
||||||
|
|
||||||
|
else:
|
||||||
|
for service in services:
|
||||||
|
build_service(service)
|
||||||
|
|
||||||
def create(
|
def create(
|
||||||
self,
|
self,
|
||||||
service_names=None,
|
service_names=None,
|
||||||
@ -471,7 +474,6 @@ class Project(object):
|
|||||||
svc.ensure_image_exists(do_build=do_build, silent=silent)
|
svc.ensure_image_exists(do_build=do_build, silent=silent)
|
||||||
plans = self._get_convergence_plans(
|
plans = self._get_convergence_plans(
|
||||||
services, strategy, always_recreate_deps=always_recreate_deps)
|
services, strategy, always_recreate_deps=always_recreate_deps)
|
||||||
scaled_services = self.get_scaled_services(services, scale_override)
|
|
||||||
|
|
||||||
def do(service):
|
def do(service):
|
||||||
|
|
||||||
@ -482,7 +484,6 @@ class Project(object):
|
|||||||
scale_override=scale_override.get(service.name),
|
scale_override=scale_override.get(service.name),
|
||||||
rescale=rescale,
|
rescale=rescale,
|
||||||
start=start,
|
start=start,
|
||||||
project_services=scaled_services,
|
|
||||||
reset_container_image=reset_container_image,
|
reset_container_image=reset_container_image,
|
||||||
renew_anonymous_volumes=renew_anonymous_volumes,
|
renew_anonymous_volumes=renew_anonymous_volumes,
|
||||||
)
|
)
|
||||||
@ -548,16 +549,37 @@ class Project(object):
|
|||||||
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
|
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
|
||||||
include_deps=False):
|
include_deps=False):
|
||||||
services = self.get_services(service_names, include_deps)
|
services = self.get_services(service_names, include_deps)
|
||||||
|
msg = not silent and 'Pulling' or None
|
||||||
|
|
||||||
if parallel_pull:
|
if parallel_pull:
|
||||||
def pull_service(service):
|
def pull_service(service):
|
||||||
service.pull(ignore_pull_failures, True)
|
strm = service.pull(ignore_pull_failures, True, stream=True)
|
||||||
|
writer = parallel.get_stream_writer()
|
||||||
|
|
||||||
|
def trunc(s):
|
||||||
|
if len(s) > 35:
|
||||||
|
return s[:33] + '...'
|
||||||
|
return s
|
||||||
|
|
||||||
|
for event in strm:
|
||||||
|
if 'status' not in event:
|
||||||
|
continue
|
||||||
|
status = event['status'].lower()
|
||||||
|
if 'progressDetail' in event:
|
||||||
|
detail = event['progressDetail']
|
||||||
|
if 'current' in detail and 'total' in detail:
|
||||||
|
percentage = float(detail['current']) / float(detail['total'])
|
||||||
|
status = '{} ({:.1%})'.format(status, percentage)
|
||||||
|
|
||||||
|
writer.write(
|
||||||
|
msg, service.name, trunc(status), lambda s: s
|
||||||
|
)
|
||||||
|
|
||||||
_, errors = parallel.parallel_execute(
|
_, errors = parallel.parallel_execute(
|
||||||
services,
|
services,
|
||||||
pull_service,
|
pull_service,
|
||||||
operator.attrgetter('name'),
|
operator.attrgetter('name'),
|
||||||
not silent and 'Pulling' or None,
|
msg,
|
||||||
limit=5,
|
limit=5,
|
||||||
)
|
)
|
||||||
if len(errors):
|
if len(errors):
|
||||||
|
@ -40,8 +40,10 @@ from .const import LABEL_CONTAINER_NUMBER
|
|||||||
from .const import LABEL_ONE_OFF
|
from .const import LABEL_ONE_OFF
|
||||||
from .const import LABEL_PROJECT
|
from .const import LABEL_PROJECT
|
||||||
from .const import LABEL_SERVICE
|
from .const import LABEL_SERVICE
|
||||||
|
from .const import LABEL_SLUG
|
||||||
from .const import LABEL_VERSION
|
from .const import LABEL_VERSION
|
||||||
from .const import NANOCPUS_SCALE
|
from .const import NANOCPUS_SCALE
|
||||||
|
from .const import WINDOWS_LONGPATH_PREFIX
|
||||||
from .container import Container
|
from .container import Container
|
||||||
from .errors import HealthCheckFailed
|
from .errors import HealthCheckFailed
|
||||||
from .errors import NoHealthCheckConfigured
|
from .errors import NoHealthCheckConfigured
|
||||||
@ -49,9 +51,11 @@ from .errors import OperationFailedError
|
|||||||
from .parallel import parallel_execute
|
from .parallel import parallel_execute
|
||||||
from .progress_stream import stream_output
|
from .progress_stream import stream_output
|
||||||
from .progress_stream import StreamOutputError
|
from .progress_stream import StreamOutputError
|
||||||
|
from .utils import generate_random_id
|
||||||
from .utils import json_hash
|
from .utils import json_hash
|
||||||
from .utils import parse_bytes
|
from .utils import parse_bytes
|
||||||
from .utils import parse_seconds_float
|
from .utils import parse_seconds_float
|
||||||
|
from .utils import truncate_id
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@ -122,7 +126,7 @@ class NoSuchImageError(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
ServiceName = namedtuple('ServiceName', 'project service number')
|
ServiceName = namedtuple('ServiceName', 'project service number slug')
|
||||||
|
|
||||||
|
|
||||||
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
|
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
|
||||||
@ -219,7 +223,6 @@ class Service(object):
|
|||||||
"""Return a :class:`compose.container.Container` for this service. The
|
"""Return a :class:`compose.container.Container` for this service. The
|
||||||
container must be active, and match `number`.
|
container must be active, and match `number`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
|
for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
|
||||||
return container
|
return container
|
||||||
|
|
||||||
@ -425,27 +428,33 @@ class Service(object):
|
|||||||
|
|
||||||
return has_diverged
|
return has_diverged
|
||||||
|
|
||||||
def _execute_convergence_create(self, scale, detached, start, project_services=None):
|
def _execute_convergence_create(self, scale, detached, start):
|
||||||
i = self._next_container_number()
|
|
||||||
|
|
||||||
def create_and_start(service, n):
|
i = self._next_container_number()
|
||||||
container = service.create_container(number=n, quiet=True)
|
|
||||||
if not detached:
|
|
||||||
container.attach_log_stream()
|
|
||||||
if start:
|
|
||||||
self.start_container(container)
|
|
||||||
return container
|
|
||||||
|
|
||||||
containers, errors = parallel_execute(
|
def create_and_start(service, n):
|
||||||
[ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
|
container = service.create_container(number=n, quiet=True)
|
||||||
lambda service_name: create_and_start(self, service_name.number),
|
if not detached:
|
||||||
lambda service_name: self.get_container_name(service_name.service, service_name.number),
|
container.attach_log_stream()
|
||||||
"Creating"
|
if start:
|
||||||
)
|
self.start_container(container)
|
||||||
for error in errors.values():
|
return container
|
||||||
raise OperationFailedError(error)
|
|
||||||
|
|
||||||
return containers
|
containers, errors = parallel_execute(
|
||||||
|
[
|
||||||
|
ServiceName(self.project, self.name, index, generate_random_id())
|
||||||
|
for index in range(i, i + scale)
|
||||||
|
],
|
||||||
|
lambda service_name: create_and_start(self, service_name.number),
|
||||||
|
lambda service_name: self.get_container_name(
|
||||||
|
service_name.service, service_name.number, service_name.slug
|
||||||
|
),
|
||||||
|
"Creating"
|
||||||
|
)
|
||||||
|
for error in errors.values():
|
||||||
|
raise OperationFailedError(error)
|
||||||
|
|
||||||
|
return containers
|
||||||
|
|
||||||
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
|
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
|
||||||
renew_anonymous_volumes):
|
renew_anonymous_volumes):
|
||||||
@ -508,8 +517,8 @@ class Service(object):
|
|||||||
|
|
||||||
def execute_convergence_plan(self, plan, timeout=None, detached=False,
|
def execute_convergence_plan(self, plan, timeout=None, detached=False,
|
||||||
start=True, scale_override=None,
|
start=True, scale_override=None,
|
||||||
rescale=True, project_services=None,
|
rescale=True, reset_container_image=False,
|
||||||
reset_container_image=False, renew_anonymous_volumes=False):
|
renew_anonymous_volumes=False):
|
||||||
(action, containers) = plan
|
(action, containers) = plan
|
||||||
scale = scale_override if scale_override is not None else self.scale_num
|
scale = scale_override if scale_override is not None else self.scale_num
|
||||||
containers = sorted(containers, key=attrgetter('number'))
|
containers = sorted(containers, key=attrgetter('number'))
|
||||||
@ -518,7 +527,7 @@ class Service(object):
|
|||||||
|
|
||||||
if action == 'create':
|
if action == 'create':
|
||||||
return self._execute_convergence_create(
|
return self._execute_convergence_create(
|
||||||
scale, detached, start, project_services
|
scale, detached, start
|
||||||
)
|
)
|
||||||
|
|
||||||
# The create action needs always needs an initial scale, but otherwise,
|
# The create action needs always needs an initial scale, but otherwise,
|
||||||
@ -568,7 +577,7 @@ class Service(object):
|
|||||||
container.rename_to_tmp_name()
|
container.rename_to_tmp_name()
|
||||||
new_container = self.create_container(
|
new_container = self.create_container(
|
||||||
previous_container=container if not renew_anonymous_volumes else None,
|
previous_container=container if not renew_anonymous_volumes else None,
|
||||||
number=container.labels.get(LABEL_CONTAINER_NUMBER),
|
number=container.number,
|
||||||
quiet=True,
|
quiet=True,
|
||||||
)
|
)
|
||||||
if attach_logs:
|
if attach_logs:
|
||||||
@ -656,9 +665,15 @@ class Service(object):
|
|||||||
return json_hash(self.config_dict())
|
return json_hash(self.config_dict())
|
||||||
|
|
||||||
def config_dict(self):
|
def config_dict(self):
|
||||||
|
def image_id():
|
||||||
|
try:
|
||||||
|
return self.image()['Id']
|
||||||
|
except NoSuchImageError:
|
||||||
|
return None
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'options': self.options,
|
'options': self.options,
|
||||||
'image_id': self.image()['Id'],
|
'image_id': image_id(),
|
||||||
'links': self.get_link_names(),
|
'links': self.get_link_names(),
|
||||||
'net': self.network_mode.id,
|
'net': self.network_mode.id,
|
||||||
'networks': self.networks,
|
'networks': self.networks,
|
||||||
@ -717,8 +732,6 @@ class Service(object):
|
|||||||
def get_volumes_from_names(self):
|
def get_volumes_from_names(self):
|
||||||
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
|
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
|
||||||
|
|
||||||
# TODO: this would benefit from github.com/docker/docker/pull/14699
|
|
||||||
# to remove the need to inspect every container
|
|
||||||
def _next_container_number(self, one_off=False):
|
def _next_container_number(self, one_off=False):
|
||||||
containers = itertools.chain(
|
containers = itertools.chain(
|
||||||
self._fetch_containers(
|
self._fetch_containers(
|
||||||
@ -807,6 +820,7 @@ class Service(object):
|
|||||||
one_off=False,
|
one_off=False,
|
||||||
previous_container=None):
|
previous_container=None):
|
||||||
add_config_hash = (not one_off and not override_options)
|
add_config_hash = (not one_off and not override_options)
|
||||||
|
slug = generate_random_id() if previous_container is None else previous_container.full_slug
|
||||||
|
|
||||||
container_options = dict(
|
container_options = dict(
|
||||||
(k, self.options[k])
|
(k, self.options[k])
|
||||||
@ -815,7 +829,7 @@ class Service(object):
|
|||||||
container_options.update(override_options)
|
container_options.update(override_options)
|
||||||
|
|
||||||
if not container_options.get('name'):
|
if not container_options.get('name'):
|
||||||
container_options['name'] = self.get_container_name(self.name, number, one_off)
|
container_options['name'] = self.get_container_name(self.name, number, slug, one_off)
|
||||||
|
|
||||||
container_options.setdefault('detach', True)
|
container_options.setdefault('detach', True)
|
||||||
|
|
||||||
@ -867,7 +881,9 @@ class Service(object):
|
|||||||
container_options.get('labels', {}),
|
container_options.get('labels', {}),
|
||||||
self.labels(one_off=one_off),
|
self.labels(one_off=one_off),
|
||||||
number,
|
number,
|
||||||
self.config_hash if add_config_hash else None)
|
self.config_hash if add_config_hash else None,
|
||||||
|
slug
|
||||||
|
)
|
||||||
|
|
||||||
# Delete options which are only used in HostConfig
|
# Delete options which are only used in HostConfig
|
||||||
for key in HOST_CONFIG_KEYS:
|
for key in HOST_CONFIG_KEYS:
|
||||||
@ -1033,12 +1049,7 @@ class Service(object):
|
|||||||
for k, v in self._parse_proxy_config().items():
|
for k, v in self._parse_proxy_config().items():
|
||||||
build_args.setdefault(k, v)
|
build_args.setdefault(k, v)
|
||||||
|
|
||||||
# python2 os.stat() doesn't support unicode on some UNIX, so we
|
path = rewrite_build_path(build_opts.get('context'))
|
||||||
# encode it to a bytestring to be safe
|
|
||||||
path = build_opts.get('context')
|
|
||||||
if not six.PY3 and not IS_WINDOWS_PLATFORM:
|
|
||||||
path = path.encode('utf8')
|
|
||||||
|
|
||||||
if self.platform and version_lt(self.client.api_version, '1.35'):
|
if self.platform and version_lt(self.client.api_version, '1.35'):
|
||||||
raise OperationFailedError(
|
raise OperationFailedError(
|
||||||
'Impossible to perform platform-targeted builds for API version < 1.35'
|
'Impossible to perform platform-targeted builds for API version < 1.35'
|
||||||
@ -1068,7 +1079,7 @@ class Service(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
all_events = stream_output(build_output, sys.stdout)
|
all_events = list(stream_output(build_output, sys.stdout))
|
||||||
except StreamOutputError as e:
|
except StreamOutputError as e:
|
||||||
raise BuildError(self, six.text_type(e))
|
raise BuildError(self, six.text_type(e))
|
||||||
|
|
||||||
@ -1105,12 +1116,12 @@ class Service(object):
|
|||||||
def custom_container_name(self):
|
def custom_container_name(self):
|
||||||
return self.options.get('container_name')
|
return self.options.get('container_name')
|
||||||
|
|
||||||
def get_container_name(self, service_name, number, one_off=False):
|
def get_container_name(self, service_name, number, slug, one_off=False):
|
||||||
if self.custom_container_name and not one_off:
|
if self.custom_container_name and not one_off:
|
||||||
return self.custom_container_name
|
return self.custom_container_name
|
||||||
|
|
||||||
container_name = build_container_name(
|
container_name = build_container_name(
|
||||||
self.project, service_name, number, one_off,
|
self.project, service_name, number, slug, one_off,
|
||||||
)
|
)
|
||||||
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
|
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
|
||||||
if container_name in ext_links_origins:
|
if container_name in ext_links_origins:
|
||||||
@ -1162,7 +1173,23 @@ class Service(object):
|
|||||||
|
|
||||||
return any(has_host_port(binding) for binding in self.options.get('ports', []))
|
return any(has_host_port(binding) for binding in self.options.get('ports', []))
|
||||||
|
|
||||||
def pull(self, ignore_pull_failures=False, silent=False):
|
def _do_pull(self, repo, pull_kwargs, silent, ignore_pull_failures):
|
||||||
|
try:
|
||||||
|
output = self.client.pull(repo, **pull_kwargs)
|
||||||
|
if silent:
|
||||||
|
with open(os.devnull, 'w') as devnull:
|
||||||
|
for event in stream_output(output, devnull):
|
||||||
|
yield event
|
||||||
|
else:
|
||||||
|
for event in stream_output(output, sys.stdout):
|
||||||
|
yield event
|
||||||
|
except (StreamOutputError, NotFound) as e:
|
||||||
|
if not ignore_pull_failures:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
log.error(six.text_type(e))
|
||||||
|
|
||||||
|
def pull(self, ignore_pull_failures=False, silent=False, stream=False):
|
||||||
if 'image' not in self.options:
|
if 'image' not in self.options:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -1179,20 +1206,11 @@ class Service(object):
|
|||||||
raise OperationFailedError(
|
raise OperationFailedError(
|
||||||
'Impossible to perform platform-targeted pulls for API version < 1.35'
|
'Impossible to perform platform-targeted pulls for API version < 1.35'
|
||||||
)
|
)
|
||||||
try:
|
|
||||||
output = self.client.pull(repo, **kwargs)
|
event_stream = self._do_pull(repo, kwargs, silent, ignore_pull_failures)
|
||||||
if silent:
|
if stream:
|
||||||
with open(os.devnull, 'w') as devnull:
|
return event_stream
|
||||||
return progress_stream.get_digest_from_pull(
|
return progress_stream.get_digest_from_pull(event_stream)
|
||||||
stream_output(output, devnull))
|
|
||||||
else:
|
|
||||||
return progress_stream.get_digest_from_pull(
|
|
||||||
stream_output(output, sys.stdout))
|
|
||||||
except (StreamOutputError, NotFound) as e:
|
|
||||||
if not ignore_pull_failures:
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
log.error(six.text_type(e))
|
|
||||||
|
|
||||||
def push(self, ignore_push_failures=False):
|
def push(self, ignore_push_failures=False):
|
||||||
if 'image' not in self.options or 'build' not in self.options:
|
if 'image' not in self.options or 'build' not in self.options:
|
||||||
@ -1360,11 +1378,13 @@ class ServiceNetworkMode(object):
|
|||||||
# Names
|
# Names
|
||||||
|
|
||||||
|
|
||||||
def build_container_name(project, service, number, one_off=False):
|
def build_container_name(project, service, number, slug, one_off=False):
|
||||||
bits = [project.lstrip('-_'), service]
|
bits = [project.lstrip('-_'), service]
|
||||||
if one_off:
|
if one_off:
|
||||||
bits.append('run')
|
bits.append('run')
|
||||||
return '_'.join(bits + [str(number)])
|
return '_'.join(
|
||||||
|
bits + ([str(number), truncate_id(slug)] if slug else [str(number)])
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Images
|
# Images
|
||||||
@ -1545,10 +1565,11 @@ def build_mount(mount_spec):
|
|||||||
# Labels
|
# Labels
|
||||||
|
|
||||||
|
|
||||||
def build_container_labels(label_options, service_labels, number, config_hash):
|
def build_container_labels(label_options, service_labels, number, config_hash, slug):
|
||||||
labels = dict(label_options or {})
|
labels = dict(label_options or {})
|
||||||
labels.update(label.split('=', 1) for label in service_labels)
|
labels.update(label.split('=', 1) for label in service_labels)
|
||||||
labels[LABEL_CONTAINER_NUMBER] = str(number)
|
labels[LABEL_CONTAINER_NUMBER] = str(number)
|
||||||
|
labels[LABEL_SLUG] = slug
|
||||||
labels[LABEL_VERSION] = __version__
|
labels[LABEL_VERSION] = __version__
|
||||||
|
|
||||||
if config_hash:
|
if config_hash:
|
||||||
@ -1637,3 +1658,15 @@ def convert_blkio_config(blkio_config):
|
|||||||
arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
|
arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
|
||||||
result[field] = arr
|
result[field] = arr
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def rewrite_build_path(path):
|
||||||
|
# python2 os.stat() doesn't support unicode on some UNIX, so we
|
||||||
|
# encode it to a bytestring to be safe
|
||||||
|
if not six.PY3 and not IS_WINDOWS_PLATFORM:
|
||||||
|
path = path.encode('utf8')
|
||||||
|
|
||||||
|
if IS_WINDOWS_PLATFORM and not path.startswith(WINDOWS_LONGPATH_PREFIX):
|
||||||
|
path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path)
|
||||||
|
|
||||||
|
return path
|
||||||
|
@ -7,6 +7,7 @@ import json
|
|||||||
import json.decoder
|
import json.decoder
|
||||||
import logging
|
import logging
|
||||||
import ntpath
|
import ntpath
|
||||||
|
import random
|
||||||
|
|
||||||
import six
|
import six
|
||||||
from docker.errors import DockerException
|
from docker.errors import DockerException
|
||||||
@ -151,3 +152,21 @@ def unquote_path(s):
|
|||||||
if s[0] == '"' and s[-1] == '"':
|
if s[0] == '"' and s[-1] == '"':
|
||||||
return s[1:-1]
|
return s[1:-1]
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def generate_random_id():
|
||||||
|
while True:
|
||||||
|
val = hex(random.getrandbits(32 * 8))[2:-1]
|
||||||
|
try:
|
||||||
|
int(truncate_id(val))
|
||||||
|
continue
|
||||||
|
except ValueError:
|
||||||
|
return val
|
||||||
|
|
||||||
|
|
||||||
|
def truncate_id(value):
|
||||||
|
if ':' in value:
|
||||||
|
value = value[value.index(':') + 1:]
|
||||||
|
if len(value) > 12:
|
||||||
|
return value[:12]
|
||||||
|
return value
|
||||||
|
@ -136,7 +136,7 @@ _docker_compose_bundle() {
|
|||||||
|
|
||||||
|
|
||||||
_docker_compose_config() {
|
_docker_compose_config() {
|
||||||
COMPREPLY=( $( compgen -W "--help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "--help --quiet -q --resolve-image-digests --services --volumes --hash" -- "$cur" ) )
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
145
contrib/completion/zsh/_docker-compose
Normal file → Executable file
145
contrib/completion/zsh/_docker-compose
Normal file → Executable file
@ -23,7 +23,7 @@ __docker-compose_all_services_in_compose_file() {
|
|||||||
local already_selected
|
local already_selected
|
||||||
local -a services
|
local -a services
|
||||||
already_selected=$(echo $words | tr " " "|")
|
already_selected=$(echo $words | tr " " "|")
|
||||||
__docker-compose_q config --services \
|
__docker-compose_q ps --services "$@" \
|
||||||
| grep -Ev "^(${already_selected})$"
|
| grep -Ev "^(${already_selected})$"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,125 +31,42 @@ __docker-compose_all_services_in_compose_file() {
|
|||||||
__docker-compose_services_all() {
|
__docker-compose_services_all() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
integer ret=1
|
integer ret=1
|
||||||
services=$(__docker-compose_all_services_in_compose_file)
|
services=$(__docker-compose_all_services_in_compose_file "$@")
|
||||||
_alternative "args:services:($services)" && ret=0
|
_alternative "args:services:($services)" && ret=0
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
# All services that have an entry with the given key in their docker-compose.yml section
|
|
||||||
__docker-compose_services_with_key() {
|
|
||||||
local already_selected
|
|
||||||
local -a buildable
|
|
||||||
already_selected=$(echo $words | tr " " "|")
|
|
||||||
# flatten sections to one line, then filter lines containing the key and return section name.
|
|
||||||
__docker-compose_q config \
|
|
||||||
| sed -n -e '/^services:/,/^[^ ]/p' \
|
|
||||||
| sed -n 's/^ //p' \
|
|
||||||
| awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
|
|
||||||
| grep " \+$1:" \
|
|
||||||
| cut -d: -f1 \
|
|
||||||
| grep -Ev "^(${already_selected})$"
|
|
||||||
}
|
|
||||||
|
|
||||||
# All services that are defined by a Dockerfile reference
|
# All services that are defined by a Dockerfile reference
|
||||||
__docker-compose_services_from_build() {
|
__docker-compose_services_from_build() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
integer ret=1
|
__docker-compose_services_all --filter source=build
|
||||||
buildable=$(__docker-compose_services_with_key build)
|
|
||||||
_alternative "args:buildable services:($buildable)" && ret=0
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# All services that are defined by an image
|
# All services that are defined by an image
|
||||||
__docker-compose_services_from_image() {
|
__docker-compose_services_from_image() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
integer ret=1
|
__docker-compose_services_all --filter source=image
|
||||||
pullable=$(__docker-compose_services_with_key image)
|
|
||||||
_alternative "args:pullable services:($pullable)" && ret=0
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
__docker-compose_get_services() {
|
|
||||||
[[ $PREFIX = -* ]] && return 1
|
|
||||||
integer ret=1
|
|
||||||
local kind
|
|
||||||
declare -a running paused stopped lines args services
|
|
||||||
|
|
||||||
docker_status=$(docker ps > /dev/null 2>&1)
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
_message "Error! Docker is not running."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
kind=$1
|
|
||||||
shift
|
|
||||||
[[ $kind =~ (stopped|all) ]] && args=($args -a)
|
|
||||||
|
|
||||||
lines=(${(f)"$(_call_program commands docker $docker_options ps --format 'table' $args)"})
|
|
||||||
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
|
|
||||||
|
|
||||||
# Parse header line to find columns
|
|
||||||
local i=1 j=1 k header=${lines[1]}
|
|
||||||
declare -A begin end
|
|
||||||
while (( j < ${#header} - 1 )); do
|
|
||||||
i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
|
|
||||||
j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
|
|
||||||
k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
|
|
||||||
begin[${header[$i,$((j-1))]}]=$i
|
|
||||||
end[${header[$i,$((j-1))]}]=$k
|
|
||||||
done
|
|
||||||
lines=(${lines[2,-1]})
|
|
||||||
|
|
||||||
# Container ID
|
|
||||||
local line s name
|
|
||||||
local -a names
|
|
||||||
for line in $lines; do
|
|
||||||
if [[ ${services[@]} == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then
|
|
||||||
names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}})
|
|
||||||
for name in $names; do
|
|
||||||
s="${${name%_*}#*_}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
|
|
||||||
s="$s, ${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"
|
|
||||||
s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}"
|
|
||||||
if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
|
|
||||||
stopped=($stopped $s)
|
|
||||||
else
|
|
||||||
if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = *\(Paused\)* ]]; then
|
|
||||||
paused=($paused $s)
|
|
||||||
fi
|
|
||||||
running=($running $s)
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
[[ $kind =~ (running|all) ]] && _describe -t services-running "running services" running "$@" && ret=0
|
|
||||||
[[ $kind =~ (paused|all) ]] && _describe -t services-paused "paused services" paused "$@" && ret=0
|
|
||||||
[[ $kind =~ (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped "$@" && ret=0
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__docker-compose_pausedservices() {
|
__docker-compose_pausedservices() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
__docker-compose_get_services paused "$@"
|
__docker-compose_services_all --filter status=paused
|
||||||
}
|
}
|
||||||
|
|
||||||
__docker-compose_stoppedservices() {
|
__docker-compose_stoppedservices() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
__docker-compose_get_services stopped "$@"
|
__docker-compose_services_all --filter status=stopped
|
||||||
}
|
}
|
||||||
|
|
||||||
__docker-compose_runningservices() {
|
__docker-compose_runningservices() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
__docker-compose_get_services running "$@"
|
__docker-compose_services_all --filter status=running
|
||||||
}
|
}
|
||||||
|
|
||||||
__docker-compose_services() {
|
__docker-compose_services() {
|
||||||
[[ $PREFIX = -* ]] && return 1
|
[[ $PREFIX = -* ]] && return 1
|
||||||
__docker-compose_get_services all "$@"
|
__docker-compose_services_all
|
||||||
}
|
}
|
||||||
|
|
||||||
__docker-compose_caching_policy() {
|
__docker-compose_caching_policy() {
|
||||||
@ -196,9 +113,10 @@ __docker-compose_subcommand() {
|
|||||||
$opts_help \
|
$opts_help \
|
||||||
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
|
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
|
||||||
'--force-rm[Always remove intermediate containers.]' \
|
'--force-rm[Always remove intermediate containers.]' \
|
||||||
'--memory[Memory limit for the build container.]' \
|
'(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \
|
||||||
'--no-cache[Do not use cache when building the image.]' \
|
'--no-cache[Do not use cache when building the image.]' \
|
||||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||||
|
'--compress[Compress the build context using gzip.]' \
|
||||||
'*:services:__docker-compose_services_from_build' && ret=0
|
'*:services:__docker-compose_services_from_build' && ret=0
|
||||||
;;
|
;;
|
||||||
(bundle)
|
(bundle)
|
||||||
@ -213,7 +131,8 @@ __docker-compose_subcommand() {
|
|||||||
'(--quiet -q)'{--quiet,-q}"[Only validate the configuration, don't print anything.]" \
|
'(--quiet -q)'{--quiet,-q}"[Only validate the configuration, don't print anything.]" \
|
||||||
'--resolve-image-digests[Pin image tags to digests.]' \
|
'--resolve-image-digests[Pin image tags to digests.]' \
|
||||||
'--services[Print the service names, one per line.]' \
|
'--services[Print the service names, one per line.]' \
|
||||||
'--volumes[Print the volume names, one per line.]' && ret=0
|
'--volumes[Print the volume names, one per line.]' \
|
||||||
|
'--hash[Print the service config hash, one per line. Set "service1,service2" for a list of specified services.]' \ && ret=0
|
||||||
;;
|
;;
|
||||||
(create)
|
(create)
|
||||||
_arguments \
|
_arguments \
|
||||||
@ -222,11 +141,12 @@ __docker-compose_subcommand() {
|
|||||||
$opts_no_recreate \
|
$opts_no_recreate \
|
||||||
$opts_no_build \
|
$opts_no_build \
|
||||||
"(--no-build)--build[Build images before creating containers.]" \
|
"(--no-build)--build[Build images before creating containers.]" \
|
||||||
'*:services:__docker-compose_services_all' && ret=0
|
'*:services:__docker-compose_services' && ret=0
|
||||||
;;
|
;;
|
||||||
(down)
|
(down)
|
||||||
_arguments \
|
_arguments \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
|
$opts_timeout \
|
||||||
"--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
|
"--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
|
||||||
'(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
|
'(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
|
||||||
$opts_remove_orphans && ret=0
|
$opts_remove_orphans && ret=0
|
||||||
@ -235,16 +155,18 @@ __docker-compose_subcommand() {
|
|||||||
_arguments \
|
_arguments \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
'--json[Output events as a stream of json objects]' \
|
'--json[Output events as a stream of json objects]' \
|
||||||
'*:services:__docker-compose_services_all' && ret=0
|
'*:services:__docker-compose_services' && ret=0
|
||||||
;;
|
;;
|
||||||
(exec)
|
(exec)
|
||||||
_arguments \
|
_arguments \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
'-d[Detached mode: Run command in the background.]' \
|
'-d[Detached mode: Run command in the background.]' \
|
||||||
'--privileged[Give extended privileges to the process.]' \
|
'--privileged[Give extended privileges to the process.]' \
|
||||||
'(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
|
'(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
|
||||||
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
|
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
|
||||||
'--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
|
'--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
|
||||||
|
'*'{-e,--env}'[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
||||||
|
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||||
'(-):running services:__docker-compose_runningservices' \
|
'(-):running services:__docker-compose_runningservices' \
|
||||||
'(-):command: _command_names -e' \
|
'(-):command: _command_names -e' \
|
||||||
'*::arguments: _normal' && ret=0
|
'*::arguments: _normal' && ret=0
|
||||||
@ -252,12 +174,12 @@ __docker-compose_subcommand() {
|
|||||||
(help)
|
(help)
|
||||||
_arguments ':subcommand:__docker-compose_commands' && ret=0
|
_arguments ':subcommand:__docker-compose_commands' && ret=0
|
||||||
;;
|
;;
|
||||||
(images)
|
(images)
|
||||||
_arguments \
|
_arguments \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
'-q[Only display IDs]' \
|
'-q[Only display IDs]' \
|
||||||
'*:services:__docker-compose_services_all' && ret=0
|
'*:services:__docker-compose_services' && ret=0
|
||||||
;;
|
;;
|
||||||
(kill)
|
(kill)
|
||||||
_arguments \
|
_arguments \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
@ -271,7 +193,7 @@ __docker-compose_subcommand() {
|
|||||||
$opts_no_color \
|
$opts_no_color \
|
||||||
'--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
|
'--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
|
||||||
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
|
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
|
||||||
'*:services:__docker-compose_services_all' && ret=0
|
'*:services:__docker-compose_services' && ret=0
|
||||||
;;
|
;;
|
||||||
(pause)
|
(pause)
|
||||||
_arguments \
|
_arguments \
|
||||||
@ -290,12 +212,16 @@ __docker-compose_subcommand() {
|
|||||||
_arguments \
|
_arguments \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
'-q[Only display IDs]' \
|
'-q[Only display IDs]' \
|
||||||
'*:services:__docker-compose_services_all' && ret=0
|
'--filter KEY=VAL[Filter services by a property]:<filtername>=<value>:' \
|
||||||
|
'*:services:__docker-compose_services' && ret=0
|
||||||
;;
|
;;
|
||||||
(pull)
|
(pull)
|
||||||
_arguments \
|
_arguments \
|
||||||
$opts_help \
|
$opts_help \
|
||||||
'--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
|
'--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
|
||||||
|
'--no-parallel[Disable parallel pulling]' \
|
||||||
|
'(-q --quiet)'{-q,--quiet}'[Pull without printing progress information]' \
|
||||||
|
'--include-deps[Also pull services declared as dependencies]' \
|
||||||
'*:services:__docker-compose_services_from_image' && ret=0
|
'*:services:__docker-compose_services_from_image' && ret=0
|
||||||
;;
|
;;
|
||||||
(push)
|
(push)
|
||||||
@ -317,6 +243,7 @@ __docker-compose_subcommand() {
|
|||||||
$opts_no_deps \
|
$opts_no_deps \
|
||||||
'-d[Detached mode: Run container in the background, print new container name.]' \
|
'-d[Detached mode: Run container in the background, print new container name.]' \
|
||||||
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
||||||
|
'*'{-l,--label}'[KEY=VAL Add or override a label (can be used multiple times)]:label KEY=VAL: ' \
|
||||||
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
|
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
|
||||||
'--name=[Assign a name to the container]:name: ' \
|
'--name=[Assign a name to the container]:name: ' \
|
||||||
'(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
|
'(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
|
||||||
@ -326,6 +253,7 @@ __docker-compose_subcommand() {
|
|||||||
'(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
|
'(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
|
||||||
'(-v --volume)*'{-v,--volume=}'[Bind mount a volume]:volume: ' \
|
'(-v --volume)*'{-v,--volume=}'[Bind mount a volume]:volume: ' \
|
||||||
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
|
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||||
|
"--use-aliases[Use the services network aliases in the network(s) the container connects to]" \
|
||||||
'(-):services:__docker-compose_services' \
|
'(-):services:__docker-compose_services' \
|
||||||
'(-):command: _command_names -e' \
|
'(-):command: _command_names -e' \
|
||||||
'*::arguments: _normal' && ret=0
|
'*::arguments: _normal' && ret=0
|
||||||
@ -369,8 +297,10 @@ __docker-compose_subcommand() {
|
|||||||
"(--no-build)--build[Build images before starting containers.]" \
|
"(--no-build)--build[Build images before starting containers.]" \
|
||||||
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
|
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
|
||||||
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
|
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
|
||||||
|
'--scale[SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.]:service scale SERVICE=NUM: ' \
|
||||||
|
'--exit-code-from=[Return the exit code of the selected service container. Implies --abort-on-container-exit]:service:__docker-compose_services' \
|
||||||
$opts_remove_orphans \
|
$opts_remove_orphans \
|
||||||
'*:services:__docker-compose_services_all' && ret=0
|
'*:services:__docker-compose_services' && ret=0
|
||||||
;;
|
;;
|
||||||
(version)
|
(version)
|
||||||
_arguments \
|
_arguments \
|
||||||
@ -409,8 +339,11 @@ _docker-compose() {
|
|||||||
'(- :)'{-h,--help}'[Get help]' \
|
'(- :)'{-h,--help}'[Get help]' \
|
||||||
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
|
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
|
||||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||||
'--verbose[Show more output]' \
|
"--compatibility[If set, Compose will attempt to convert deploy keys in v3 files to their non-Swarm equivalent]" \
|
||||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||||
|
'--verbose[Show more output]' \
|
||||||
|
'--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \
|
||||||
|
'--no-ansi[Do not print ANSI control characters]' \
|
||||||
'(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
|
'(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
|
||||||
'--tls[Use TLS; implied by --tlsverify]' \
|
'--tls[Use TLS; implied by --tlsverify]' \
|
||||||
'--tlscacert=[Trust certs signed only by this CA]:ca path:' \
|
'--tlscacert=[Trust certs signed only by this CA]:ca path:' \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
coverage==4.4.2
|
coverage==4.4.2
|
||||||
flake8==3.5.0
|
flake8==3.5.0
|
||||||
mock>=1.0.1
|
mock>=1.0.1
|
||||||
pytest==2.9.2
|
pytest==3.6.3
|
||||||
pytest-cov==2.5.1
|
pytest-cov==2.5.1
|
||||||
|
@ -2,7 +2,7 @@ backports.ssl-match-hostname==3.5.0.1; python_version < '3'
|
|||||||
cached-property==1.3.0
|
cached-property==1.3.0
|
||||||
certifi==2017.4.17
|
certifi==2017.4.17
|
||||||
chardet==3.0.4
|
chardet==3.0.4
|
||||||
docker==3.4.1
|
docker==3.5.0
|
||||||
docker-pycreds==0.3.0
|
docker-pycreds==0.3.0
|
||||||
dockerpty==0.4.1
|
dockerpty==0.4.1
|
||||||
docopt==0.6.2
|
docopt==0.6.2
|
||||||
@ -13,11 +13,11 @@ idna==2.5
|
|||||||
ipaddress==1.0.18
|
ipaddress==1.0.18
|
||||||
jsonschema==2.6.0
|
jsonschema==2.6.0
|
||||||
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
|
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
|
||||||
pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
|
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
|
||||||
PySocks==1.6.7
|
PySocks==1.6.7
|
||||||
PyYAML==3.12
|
PyYAML==3.12
|
||||||
requests==2.18.4
|
requests==2.19.1
|
||||||
six==1.10.0
|
six==1.10.0
|
||||||
texttable==0.9.1
|
texttable==0.9.1
|
||||||
urllib3==1.21.1
|
urllib3==1.21.1; python_version == '3.3'
|
||||||
websocket-client==0.32.0
|
websocket-client==0.32.0
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
PATH="/usr/local/bin:$PATH"
|
TOOLCHAIN_PATH="$(realpath $(dirname $0)/../../build/toolchain)"
|
||||||
|
|
||||||
rm -rf venv
|
rm -rf venv
|
||||||
|
|
||||||
virtualenv -p /usr/local/bin/python3 venv
|
virtualenv -p ${TOOLCHAIN_PATH}/bin/python3 venv
|
||||||
venv/bin/pip install -r requirements.txt
|
venv/bin/pip install -r requirements.txt
|
||||||
venv/bin/pip install -r requirements-build.txt
|
venv/bin/pip install -r requirements-build.txt
|
||||||
venv/bin/pip install --no-deps .
|
venv/bin/pip install --no-deps .
|
||||||
|
@ -44,7 +44,7 @@ virtualenv .\venv
|
|||||||
# pip and pyinstaller generate lots of warnings, so we need to ignore them
|
# pip and pyinstaller generate lots of warnings, so we need to ignore them
|
||||||
$ErrorActionPreference = "Continue"
|
$ErrorActionPreference = "Continue"
|
||||||
|
|
||||||
.\venv\Scripts\pip install pypiwin32==220
|
.\venv\Scripts\pip install pypiwin32==223
|
||||||
.\venv\Scripts\pip install -r requirements.txt
|
.\venv\Scripts\pip install -r requirements.txt
|
||||||
.\venv\Scripts\pip install --no-deps .
|
.\venv\Scripts\pip install --no-deps .
|
||||||
.\venv\Scripts\pip install -r requirements-build.txt
|
.\venv\Scripts\pip install -r requirements-build.txt
|
||||||
|
@ -20,6 +20,12 @@ following repositories:
|
|||||||
- docker/compose
|
- docker/compose
|
||||||
- docker/compose-tests
|
- docker/compose-tests
|
||||||
|
|
||||||
|
### A local Python environment
|
||||||
|
|
||||||
|
While most of the release script is running inside a Docker container,
|
||||||
|
fetching local Docker credentials depends on the `docker` Python package
|
||||||
|
being available locally.
|
||||||
|
|
||||||
### A Github account and Github API token
|
### A Github account and Github API token
|
||||||
|
|
||||||
Your Github account needs to have write access on the `docker/compose` repo.
|
Your Github account needs to have write access on the `docker/compose` repo.
|
||||||
|
@ -60,8 +60,11 @@ def create_bump_commit(repository, release_branch, bintray_user, bintray_org):
|
|||||||
repository.push_branch_to_remote(release_branch)
|
repository.push_branch_to_remote(release_branch)
|
||||||
|
|
||||||
bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], bintray_user)
|
bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], bintray_user)
|
||||||
print('Creating data repository {} on bintray'.format(release_branch.name))
|
if not bintray_api.repository_exists(bintray_org, release_branch.name):
|
||||||
bintray_api.create_repository(bintray_org, release_branch.name, 'generic')
|
print('Creating data repository {} on bintray'.format(release_branch.name))
|
||||||
|
bintray_api.create_repository(bintray_org, release_branch.name, 'generic')
|
||||||
|
else:
|
||||||
|
print('Bintray repository {} already exists. Skipping'.format(release_branch.name))
|
||||||
|
|
||||||
|
|
||||||
def monitor_pr_status(pr_data):
|
def monitor_pr_status(pr_data):
|
||||||
@ -74,19 +77,24 @@ def monitor_pr_status(pr_data):
|
|||||||
'pending': 0,
|
'pending': 0,
|
||||||
'success': 0,
|
'success': 0,
|
||||||
'failure': 0,
|
'failure': 0,
|
||||||
|
'error': 0,
|
||||||
}
|
}
|
||||||
for detail in status.statuses:
|
for detail in status.statuses:
|
||||||
if detail.context == 'dco-signed':
|
if detail.context == 'dco-signed':
|
||||||
# dco-signed check breaks on merge remote-tracking ; ignore it
|
# dco-signed check breaks on merge remote-tracking ; ignore it
|
||||||
continue
|
continue
|
||||||
summary[detail.state] += 1
|
if detail.state in summary:
|
||||||
print('{pending} pending, {success} successes, {failure} failures'.format(**summary))
|
summary[detail.state] += 1
|
||||||
if summary['pending'] == 0 and summary['failure'] == 0 and summary['success'] > 0:
|
print(
|
||||||
|
'{pending} pending, {success} successes, {failure} failures, '
|
||||||
|
'{error} errors'.format(**summary)
|
||||||
|
)
|
||||||
|
if summary['failure'] > 0 or summary['error'] > 0:
|
||||||
|
raise ScriptError('CI failures detected!')
|
||||||
|
elif summary['pending'] == 0 and summary['success'] > 0:
|
||||||
# This check assumes at least 1 non-DCO CI check to avoid race conditions.
|
# This check assumes at least 1 non-DCO CI check to avoid race conditions.
|
||||||
# If testing on a repo without CI, use --skip-ci-check to avoid looping eternally
|
# If testing on a repo without CI, use --skip-ci-check to avoid looping eternally
|
||||||
return True
|
return True
|
||||||
elif summary['failure'] > 0:
|
|
||||||
raise ScriptError('CI failures detected!')
|
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
elif status.state == 'success':
|
elif status.state == 'success':
|
||||||
print('{} successes: all clear!'.format(status.total_count))
|
print('{} successes: all clear!'.format(status.total_count))
|
||||||
@ -94,12 +102,14 @@ def monitor_pr_status(pr_data):
|
|||||||
|
|
||||||
|
|
||||||
def check_pr_mergeable(pr_data):
|
def check_pr_mergeable(pr_data):
|
||||||
if not pr_data.mergeable:
|
if pr_data.mergeable is False:
|
||||||
|
# mergeable can also be null, in which case the warning would be a false positive.
|
||||||
print(
|
print(
|
||||||
'WARNING!! PR #{} can not currently be merged. You will need to '
|
'WARNING!! PR #{} can not currently be merged. You will need to '
|
||||||
'resolve the conflicts manually before finalizing the release.'.format(pr_data.number)
|
'resolve the conflicts manually before finalizing the release.'.format(pr_data.number)
|
||||||
)
|
)
|
||||||
return pr_data.mergeable
|
|
||||||
|
return pr_data.mergeable is True
|
||||||
|
|
||||||
|
|
||||||
def create_release_draft(repository, version, pr_data, files):
|
def create_release_draft(repository, version, pr_data, files):
|
||||||
|
@ -15,10 +15,19 @@ if test -z $BINTRAY_TOKEN; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker run -e GITHUB_TOKEN=$GITHUB_TOKEN -e BINTRAY_TOKEN=$BINTRAY_TOKEN -e SSH_AUTH_SOCK=$SSH_AUTH_SOCK -it \
|
if test -z $(python -c "import docker; print(docker.version)" 2>/dev/null); then
|
||||||
|
echo "This script requires the 'docker' Python package to be installed locally"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
hub_credentials=$(python -c "from docker import auth; cfg = auth.load_config(); print(auth.encode_header(auth.resolve_authconfig(cfg, 'docker.io')).decode('ascii'))")
|
||||||
|
|
||||||
|
docker run -it \
|
||||||
|
-e GITHUB_TOKEN=$GITHUB_TOKEN \
|
||||||
|
-e BINTRAY_TOKEN=$BINTRAY_TOKEN \
|
||||||
|
-e SSH_AUTH_SOCK=$SSH_AUTH_SOCK \
|
||||||
|
-e HUB_CREDENTIALS=$hub_credentials \
|
||||||
--mount type=bind,source=$(pwd),target=/src \
|
--mount type=bind,source=$(pwd),target=/src \
|
||||||
--mount type=bind,source=$(pwd)/.git,target=/src/.git \
|
|
||||||
--mount type=bind,source=$HOME/.docker,target=/root/.docker \
|
|
||||||
--mount type=bind,source=$HOME/.gitconfig,target=/root/.gitconfig \
|
--mount type=bind,source=$HOME/.gitconfig,target=/root/.gitconfig \
|
||||||
--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock \
|
--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock \
|
||||||
--mount type=bind,source=$HOME/.ssh,target=/root/.ssh \
|
--mount type=bind,source=$HOME/.ssh,target=/root/.ssh \
|
||||||
|
@ -15,7 +15,7 @@ class BintrayAPI(requests.Session):
|
|||||||
self.base_url = 'https://api.bintray.com/'
|
self.base_url = 'https://api.bintray.com/'
|
||||||
|
|
||||||
def create_repository(self, subject, repo_name, repo_type='generic'):
|
def create_repository(self, subject, repo_name, repo_type='generic'):
|
||||||
url = '{base}/repos/{subject}/{repo_name}'.format(
|
url = '{base}repos/{subject}/{repo_name}'.format(
|
||||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||||
)
|
)
|
||||||
data = {
|
data = {
|
||||||
@ -27,10 +27,20 @@ class BintrayAPI(requests.Session):
|
|||||||
}
|
}
|
||||||
return self.post_json(url, data)
|
return self.post_json(url, data)
|
||||||
|
|
||||||
def delete_repository(self, subject, repo_name):
|
def repository_exists(self, subject, repo_name):
|
||||||
url = '{base}/repos/{subject}/{repo_name}'.format(
|
url = '{base}/repos/{subject}/{repo_name}'.format(
|
||||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||||
)
|
)
|
||||||
|
result = self.get(url)
|
||||||
|
if result.status_code == 404:
|
||||||
|
return False
|
||||||
|
result.raise_for_status()
|
||||||
|
return True
|
||||||
|
|
||||||
|
def delete_repository(self, subject, repo_name):
|
||||||
|
url = '{base}repos/{subject}/{repo_name}'.format(
|
||||||
|
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||||
|
)
|
||||||
return self.delete(url)
|
return self.delete(url)
|
||||||
|
|
||||||
def post_json(self, url, data, **kwargs):
|
def post_json(self, url, data, **kwargs):
|
||||||
|
@ -2,6 +2,8 @@ from __future__ import absolute_import
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
@ -15,6 +17,12 @@ class ImageManager(object):
|
|||||||
def __init__(self, version):
|
def __init__(self, version):
|
||||||
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
||||||
self.version = version
|
self.version = version
|
||||||
|
if 'HUB_CREDENTIALS' in os.environ:
|
||||||
|
print('HUB_CREDENTIALS found in environment, issuing login')
|
||||||
|
credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
|
||||||
|
self.docker_client.login(
|
||||||
|
username=credentials['Username'], password=credentials['Password']
|
||||||
|
)
|
||||||
|
|
||||||
def build_images(self, repository, files):
|
def build_images(self, repository, files):
|
||||||
print("Building release images...")
|
print("Building release images...")
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
VERSION="1.22.0"
|
VERSION="1.23.0-rc1"
|
||||||
IMAGE="docker/compose:$VERSION"
|
IMAGE="docker/compose:$VERSION"
|
||||||
|
|
||||||
|
|
||||||
@ -47,11 +47,17 @@ if [ -n "$HOME" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Only allocate tty if we detect one
|
# Only allocate tty if we detect one
|
||||||
if [ -t 1 ]; then
|
|
||||||
DOCKER_RUN_OPTIONS="-t"
|
|
||||||
fi
|
|
||||||
if [ -t 0 ]; then
|
if [ -t 0 ]; then
|
||||||
|
if [ -t 1 ]; then
|
||||||
|
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
|
||||||
|
fi
|
||||||
|
else
|
||||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Handle userns security
|
||||||
|
if [ ! -z "$(docker info 2>/dev/null | grep userns)" ]; then
|
||||||
|
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS --userns=host"
|
||||||
|
fi
|
||||||
|
|
||||||
exec docker run --rm $DOCKER_RUN_OPTIONS $DOCKER_ADDR $COMPOSE_OPTIONS $VOLUMES -w "$(pwd)" $IMAGE "$@"
|
exec docker run --rm $DOCKER_RUN_OPTIONS $DOCKER_ADDR $COMPOSE_OPTIONS $VOLUMES -w "$(pwd)" $IMAGE "$@"
|
||||||
|
123
script/setup/osx
123
script/setup/osx
@ -1,43 +1,104 @@
|
|||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
python_version() {
|
. $(dirname $0)/osx_helpers.sh
|
||||||
python -V 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
python3_version() {
|
DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET:-"$(macos_version)"}
|
||||||
python3 -V 2>&1
|
SDK_FETCH=
|
||||||
}
|
if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
|
||||||
|
SDK_FETCH=1
|
||||||
|
# SDK URL from https://github.com/docker/golang-cross/blob/master/osx-cross.sh
|
||||||
|
SDK_URL=https://s3.dockerproject.org/darwin/v2/MacOSX${DEPLOYMENT_TARGET}.sdk.tar.xz
|
||||||
|
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
|
||||||
|
fi
|
||||||
|
|
||||||
openssl_version() {
|
OPENSSL_VERSION=1.1.0h
|
||||||
python -c "import ssl; print ssl.OPENSSL_VERSION"
|
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
|
||||||
}
|
OPENSSL_SHA1=0fc39f6aa91b6e7f4d05018f7c5e991e1d2491fd
|
||||||
|
|
||||||
desired_python3_version="3.6.4"
|
PYTHON_VERSION=3.6.6
|
||||||
desired_python3_brew_version="3.6.4_2"
|
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
|
||||||
python3_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e69a9a592232fa5a82741f6acecffc2f1d198d/Formula/python3.rb"
|
PYTHON_SHA1=ae1fc9ddd29ad8c1d5f7b0d799ff0787efeb9652
|
||||||
|
|
||||||
PATH="/usr/local/bin:$PATH"
|
#
|
||||||
|
# Install prerequisites.
|
||||||
if !(which brew); then
|
#
|
||||||
|
if ! [ -x "$(command -v brew)" ]; then
|
||||||
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||||
fi
|
fi
|
||||||
|
if ! [ -x "$(command -v grealpath)" ]; then
|
||||||
brew update > /dev/null
|
brew update > /dev/null
|
||||||
|
brew install coreutils
|
||||||
if !(python3_version | grep "$desired_python3_version"); then
|
|
||||||
if brew list | grep python3; then
|
|
||||||
brew unlink python3
|
|
||||||
fi
|
|
||||||
|
|
||||||
brew install "$python3_formula"
|
|
||||||
brew switch python3 "$desired_python3_brew_version"
|
|
||||||
fi
|
fi
|
||||||
|
if ! [ -x "$(command -v python3)" ]; then
|
||||||
echo "*** Using $(python3_version) ; $(python_version)"
|
brew update > /dev/null
|
||||||
echo "*** Using $(openssl_version)"
|
brew install python3
|
||||||
|
fi
|
||||||
if !(which virtualenv); then
|
if ! [ -x "$(command -v virtualenv)" ]; then
|
||||||
pip install virtualenv
|
pip install virtualenv
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
#
|
||||||
|
# Create toolchain directory.
|
||||||
|
#
|
||||||
|
BUILD_PATH="$(grealpath $(dirname $0)/../../build)"
|
||||||
|
mkdir -p ${BUILD_PATH}
|
||||||
|
TOOLCHAIN_PATH="${BUILD_PATH}/toolchain"
|
||||||
|
mkdir -p ${TOOLCHAIN_PATH}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set macOS SDK.
|
||||||
|
#
|
||||||
|
if [ ${SDK_FETCH} ]; then
|
||||||
|
SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk
|
||||||
|
fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1}
|
||||||
|
else
|
||||||
|
SDK_PATH="$(xcode-select --print-path)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX${DEPLOYMENT_TARGET}.sdk"
|
||||||
|
fi
|
||||||
|
|
||||||
|
#
|
||||||
|
# Build OpenSSL.
|
||||||
|
#
|
||||||
|
OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION}
|
||||||
|
if ! [ -f ${TOOLCHAIN_PATH}/bin/openssl ]; then
|
||||||
|
rm -rf ${OPENSSL_SRC_PATH}
|
||||||
|
fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1}
|
||||||
|
(
|
||||||
|
cd ${OPENSSL_SRC_PATH}
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET}
|
||||||
|
export SDKROOT=${SDK_PATH}
|
||||||
|
./Configure darwin64-x86_64-cc --prefix=${TOOLCHAIN_PATH}
|
||||||
|
make install_sw install_dev
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
#
|
||||||
|
# Build Python.
|
||||||
|
#
|
||||||
|
PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION}
|
||||||
|
if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
|
||||||
|
rm -rf ${PYTHON_SRC_PATH}
|
||||||
|
fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1}
|
||||||
|
(
|
||||||
|
cd ${PYTHON_SRC_PATH}
|
||||||
|
./configure --prefix=${TOOLCHAIN_PATH} \
|
||||||
|
--enable-ipv6 --without-ensurepip --with-dtrace --without-gcc \
|
||||||
|
--datarootdir=${TOOLCHAIN_PATH}/share \
|
||||||
|
--datadir=${TOOLCHAIN_PATH}/share \
|
||||||
|
--enable-framework=${TOOLCHAIN_PATH}/Frameworks \
|
||||||
|
MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \
|
||||||
|
CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \
|
||||||
|
CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}include" \
|
||||||
|
LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib"
|
||||||
|
make -j 4
|
||||||
|
make install PYTHONAPPSDIR=${TOOLCHAIN_PATH}
|
||||||
|
make frameworkinstallextras PYTHONAPPSDIR=${TOOLCHAIN_PATH}/share
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}"
|
||||||
|
echo "*** Using SDK ${SDK_PATH}"
|
||||||
|
echo "*** Using $(python3_version ${TOOLCHAIN_PATH})"
|
||||||
|
echo "*** Using $(openssl_version ${TOOLCHAIN_PATH})"
|
||||||
|
41
script/setup/osx_helpers.sh
Normal file
41
script/setup/osx_helpers.sh
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Check file's ($1) SHA1 ($2).
|
||||||
|
check_sha1() {
|
||||||
|
echo -n "$2 *$1" | shasum -c -
|
||||||
|
}
|
||||||
|
|
||||||
|
# Download URL ($1) to path ($2).
|
||||||
|
download() {
|
||||||
|
curl -L $1 -o $2
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract tarball ($1) in folder ($2).
|
||||||
|
extract() {
|
||||||
|
tar xf $1 -C $2
|
||||||
|
}
|
||||||
|
|
||||||
|
# Download URL ($1), check SHA1 ($3), and extract utility ($2).
|
||||||
|
fetch_tarball() {
|
||||||
|
url=$1
|
||||||
|
tarball=$2.tarball
|
||||||
|
sha1=$3
|
||||||
|
download $url $tarball
|
||||||
|
check_sha1 $tarball $sha1
|
||||||
|
extract $tarball $(dirname $tarball)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Version of Python at toolchain path ($1).
|
||||||
|
python3_version() {
|
||||||
|
$1/bin/python3 -V 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Version of OpenSSL used by toolchain ($1) Python.
|
||||||
|
openssl_version() {
|
||||||
|
$1/bin/python3 -c "import ssl; print(ssl.OPENSSL_VERSION)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# System macOS version.
|
||||||
|
macos_version() {
|
||||||
|
sw_vers -productVersion | cut -f1,2 -d'.'
|
||||||
|
}
|
@ -5,7 +5,7 @@ set -ex
|
|||||||
|
|
||||||
TAG="docker-compose:$(git rev-parse --short HEAD)"
|
TAG="docker-compose:$(git rev-parse --short HEAD)"
|
||||||
|
|
||||||
# By default use the Dockerfile, but can be overriden to use an alternative file
|
# By default use the Dockerfile, but can be overridden to use an alternative file
|
||||||
# e.g DOCKERFILE=Dockerfile.armhf script/test/default
|
# e.g DOCKERFILE=Dockerfile.armhf script/test/default
|
||||||
DOCKERFILE="${DOCKERFILE:-Dockerfile}"
|
DOCKERFILE="${DOCKERFILE:-Dockerfile}"
|
||||||
|
|
||||||
|
@ -37,22 +37,21 @@ import requests
|
|||||||
GITHUB_API = 'https://api.github.com/repos'
|
GITHUB_API = 'https://api.github.com/repos'
|
||||||
|
|
||||||
|
|
||||||
class Version(namedtuple('_Version', 'major minor patch rc edition')):
|
class Version(namedtuple('_Version', 'major minor patch stage edition')):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def parse(cls, version):
|
def parse(cls, version):
|
||||||
edition = None
|
edition = None
|
||||||
version = version.lstrip('v')
|
version = version.lstrip('v')
|
||||||
version, _, rc = version.partition('-')
|
version, _, stage = version.partition('-')
|
||||||
if rc:
|
if stage:
|
||||||
if 'rc' not in rc:
|
if not any(marker in stage for marker in ['rc', 'tp', 'beta']):
|
||||||
edition = rc
|
edition = stage
|
||||||
rc = None
|
stage = None
|
||||||
elif '-' in rc:
|
elif '-' in stage:
|
||||||
edition, rc = rc.split('-')
|
edition, stage = stage.split('-')
|
||||||
|
|
||||||
major, minor, patch = version.split('.', 3)
|
major, minor, patch = version.split('.', 3)
|
||||||
return cls(major, minor, patch, rc, edition)
|
return cls(major, minor, patch, stage, edition)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def major_minor(self):
|
def major_minor(self):
|
||||||
@ -64,13 +63,13 @@ class Version(namedtuple('_Version', 'major minor patch rc edition')):
|
|||||||
correctly with the default comparator.
|
correctly with the default comparator.
|
||||||
"""
|
"""
|
||||||
# rc releases should appear before official releases
|
# rc releases should appear before official releases
|
||||||
rc = (0, self.rc) if self.rc else (1, )
|
stage = (0, self.stage) if self.stage else (1, )
|
||||||
return (int(self.major), int(self.minor), int(self.patch)) + rc
|
return (int(self.major), int(self.minor), int(self.patch)) + stage
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
rc = '-{}'.format(self.rc) if self.rc else ''
|
stage = '-{}'.format(self.stage) if self.stage else ''
|
||||||
edition = '-{}'.format(self.edition) if self.edition else ''
|
edition = '-{}'.format(self.edition) if self.edition else ''
|
||||||
return '.'.join(map(str, self[:3])) + edition + rc
|
return '.'.join(map(str, self[:3])) + edition + stage
|
||||||
|
|
||||||
|
|
||||||
BLACKLIST = [ # List of versions known to be broken and should not be used
|
BLACKLIST = [ # List of versions known to be broken and should not be used
|
||||||
@ -113,9 +112,9 @@ def get_latest_versions(versions, num=1):
|
|||||||
|
|
||||||
|
|
||||||
def get_default(versions):
|
def get_default(versions):
|
||||||
"""Return a :class:`Version` for the latest non-rc version."""
|
"""Return a :class:`Version` for the latest GA version."""
|
||||||
for version in versions:
|
for version in versions:
|
||||||
if not version.rc:
|
if not version.stage:
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
|
||||||
@ -123,8 +122,12 @@ def get_versions(tags):
|
|||||||
for tag in tags:
|
for tag in tags:
|
||||||
try:
|
try:
|
||||||
v = Version.parse(tag['name'])
|
v = Version.parse(tag['name'])
|
||||||
if v not in BLACKLIST:
|
if v in BLACKLIST:
|
||||||
yield v
|
continue
|
||||||
|
# FIXME: Temporary. Remove once these versions are built on dockerswarm/dind
|
||||||
|
if v.stage and 'rc' not in v.stage:
|
||||||
|
continue
|
||||||
|
yield v
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
|
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
|
||||||
|
|
||||||
|
5
setup.py
5
setup.py
@ -33,10 +33,10 @@ install_requires = [
|
|||||||
'cached-property >= 1.2.0, < 2',
|
'cached-property >= 1.2.0, < 2',
|
||||||
'docopt >= 0.6.1, < 0.7',
|
'docopt >= 0.6.1, < 0.7',
|
||||||
'PyYAML >= 3.10, < 4',
|
'PyYAML >= 3.10, < 4',
|
||||||
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.19',
|
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.20',
|
||||||
'texttable >= 0.9.0, < 0.10',
|
'texttable >= 0.9.0, < 0.10',
|
||||||
'websocket-client >= 0.32.0, < 1.0',
|
'websocket-client >= 0.32.0, < 1.0',
|
||||||
'docker >= 3.4.1, < 4.0',
|
'docker >= 3.5.0, < 4.0',
|
||||||
'dockerpty >= 0.4.1, < 0.5',
|
'dockerpty >= 0.4.1, < 0.5',
|
||||||
'six >= 1.3.0, < 2',
|
'six >= 1.3.0, < 2',
|
||||||
'jsonschema >= 2.5.1, < 3',
|
'jsonschema >= 2.5.1, < 3',
|
||||||
@ -100,5 +100,6 @@ setup(
|
|||||||
'Programming Language :: Python :: 3',
|
'Programming Language :: Python :: 3',
|
||||||
'Programming Language :: Python :: 3.4',
|
'Programming Language :: Python :: 3.4',
|
||||||
'Programming Language :: Python :: 3.6',
|
'Programming Language :: Python :: 3.6',
|
||||||
|
'Programming Language :: Python :: 3.7',
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -99,7 +99,14 @@ class ContainerStateCondition(object):
|
|||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
try:
|
try:
|
||||||
container = self.client.inspect_container(self.name)
|
if self.name.endswith('*'):
|
||||||
|
ctnrs = self.client.containers(all=True, filters={'name': self.name[:-1]})
|
||||||
|
if len(ctnrs) > 0:
|
||||||
|
container = self.client.inspect_container(ctnrs[0]['Id'])
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
container = self.client.inspect_container(self.name)
|
||||||
return container['State']['Status'] == self.status
|
return container['State']['Status'] == self.status
|
||||||
except errors.APIError:
|
except errors.APIError:
|
||||||
return False
|
return False
|
||||||
@ -222,6 +229,16 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
self.base_dir = 'tests/fixtures/v2-full'
|
self.base_dir = 'tests/fixtures/v2-full'
|
||||||
assert self.dispatch(['config', '--quiet']).stdout == ''
|
assert self.dispatch(['config', '--quiet']).stdout == ''
|
||||||
|
|
||||||
|
def test_config_with_hash_option(self):
|
||||||
|
self.base_dir = 'tests/fixtures/v2-full'
|
||||||
|
result = self.dispatch(['config', '--hash=*'])
|
||||||
|
for service in self.project.get_services():
|
||||||
|
assert '{} {}\n'.format(service.name, service.config_hash) in result.stdout
|
||||||
|
|
||||||
|
svc = self.project.get_service('other')
|
||||||
|
result = self.dispatch(['config', '--hash=other'])
|
||||||
|
assert result.stdout == '{} {}\n'.format(svc.name, svc.config_hash)
|
||||||
|
|
||||||
def test_config_default(self):
|
def test_config_default(self):
|
||||||
self.base_dir = 'tests/fixtures/v2-full'
|
self.base_dir = 'tests/fixtures/v2-full'
|
||||||
result = self.dispatch(['config'])
|
result = self.dispatch(['config'])
|
||||||
@ -293,6 +310,36 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def test_config_with_dot_env(self):
|
||||||
|
self.base_dir = 'tests/fixtures/default-env-file'
|
||||||
|
result = self.dispatch(['config'])
|
||||||
|
json_result = yaml.load(result.stdout)
|
||||||
|
assert json_result == {
|
||||||
|
'services': {
|
||||||
|
'web': {
|
||||||
|
'command': 'true',
|
||||||
|
'image': 'alpine:latest',
|
||||||
|
'ports': ['5643/tcp', '9999/tcp']
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'version': '2.4'
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_config_with_dot_env_and_override_dir(self):
|
||||||
|
self.base_dir = 'tests/fixtures/default-env-file'
|
||||||
|
result = self.dispatch(['--project-directory', 'alt/', 'config'])
|
||||||
|
json_result = yaml.load(result.stdout)
|
||||||
|
assert json_result == {
|
||||||
|
'services': {
|
||||||
|
'web': {
|
||||||
|
'command': 'echo uwu',
|
||||||
|
'image': 'alpine:3.4',
|
||||||
|
'ports': ['3341/tcp', '4449/tcp']
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'version': '2.4'
|
||||||
|
}
|
||||||
|
|
||||||
def test_config_external_volume_v2(self):
|
def test_config_external_volume_v2(self):
|
||||||
self.base_dir = 'tests/fixtures/volumes'
|
self.base_dir = 'tests/fixtures/volumes'
|
||||||
result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
|
result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
|
||||||
@ -773,6 +820,13 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
|
|
||||||
assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
|
assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
|
||||||
|
|
||||||
|
def test_build_parallel(self):
|
||||||
|
self.base_dir = 'tests/fixtures/build-multiple-composefile'
|
||||||
|
result = self.dispatch(['build', '--parallel'])
|
||||||
|
assert 'Successfully tagged build-multiple-composefile_a:latest' in result.stdout
|
||||||
|
assert 'Successfully tagged build-multiple-composefile_b:latest' in result.stdout
|
||||||
|
assert 'Successfully built' in result.stdout
|
||||||
|
|
||||||
def test_create(self):
|
def test_create(self):
|
||||||
self.dispatch(['create'])
|
self.dispatch(['create'])
|
||||||
service = self.project.get_service('simple')
|
service = self.project.get_service('simple')
|
||||||
@ -972,11 +1026,15 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
def test_up_attached(self):
|
def test_up_attached(self):
|
||||||
self.base_dir = 'tests/fixtures/echo-services'
|
self.base_dir = 'tests/fixtures/echo-services'
|
||||||
result = self.dispatch(['up', '--no-color'])
|
result = self.dispatch(['up', '--no-color'])
|
||||||
|
simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project
|
||||||
|
another_name = self.project.get_service('another').containers(
|
||||||
|
stopped=True
|
||||||
|
)[0].name_without_project
|
||||||
|
|
||||||
assert 'simple_1 | simple' in result.stdout
|
assert '{} | simple'.format(simple_name) in result.stdout
|
||||||
assert 'another_1 | another' in result.stdout
|
assert '{} | another'.format(another_name) in result.stdout
|
||||||
assert 'simple_1 exited with code 0' in result.stdout
|
assert '{} exited with code 0'.format(simple_name) in result.stdout
|
||||||
assert 'another_1 exited with code 0' in result.stdout
|
assert '{} exited with code 0'.format(another_name) in result.stdout
|
||||||
|
|
||||||
@v2_only()
|
@v2_only()
|
||||||
def test_up(self):
|
def test_up(self):
|
||||||
@ -1680,11 +1738,12 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
def test_run_rm(self):
|
def test_run_rm(self):
|
||||||
self.base_dir = 'tests/fixtures/volume'
|
self.base_dir = 'tests/fixtures/volume'
|
||||||
proc = start_process(self.base_dir, ['run', '--rm', 'test'])
|
proc = start_process(self.base_dir, ['run', '--rm', 'test'])
|
||||||
|
service = self.project.get_service('test')
|
||||||
wait_on_condition(ContainerStateCondition(
|
wait_on_condition(ContainerStateCondition(
|
||||||
self.project.client,
|
self.project.client,
|
||||||
'volume_test_run_1',
|
'volume_test_run_*',
|
||||||
'running'))
|
'running')
|
||||||
service = self.project.get_service('test')
|
)
|
||||||
containers = service.containers(one_off=OneOffFilter.only)
|
containers = service.containers(one_off=OneOffFilter.only)
|
||||||
assert len(containers) == 1
|
assert len(containers) == 1
|
||||||
mounts = containers[0].get('Mounts')
|
mounts = containers[0].get('Mounts')
|
||||||
@ -2007,39 +2066,39 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
||||||
wait_on_condition(ContainerStateCondition(
|
wait_on_condition(ContainerStateCondition(
|
||||||
self.project.client,
|
self.project.client,
|
||||||
'simple-composefile_simple_run_1',
|
'simple-composefile_simple_run_*',
|
||||||
'running'))
|
'running'))
|
||||||
|
|
||||||
os.kill(proc.pid, signal.SIGINT)
|
os.kill(proc.pid, signal.SIGINT)
|
||||||
wait_on_condition(ContainerStateCondition(
|
wait_on_condition(ContainerStateCondition(
|
||||||
self.project.client,
|
self.project.client,
|
||||||
'simple-composefile_simple_run_1',
|
'simple-composefile_simple_run_*',
|
||||||
'exited'))
|
'exited'))
|
||||||
|
|
||||||
def test_run_handles_sigterm(self):
|
def test_run_handles_sigterm(self):
|
||||||
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
||||||
wait_on_condition(ContainerStateCondition(
|
wait_on_condition(ContainerStateCondition(
|
||||||
self.project.client,
|
self.project.client,
|
||||||
'simple-composefile_simple_run_1',
|
'simple-composefile_simple_run_*',
|
||||||
'running'))
|
'running'))
|
||||||
|
|
||||||
os.kill(proc.pid, signal.SIGTERM)
|
os.kill(proc.pid, signal.SIGTERM)
|
||||||
wait_on_condition(ContainerStateCondition(
|
wait_on_condition(ContainerStateCondition(
|
||||||
self.project.client,
|
self.project.client,
|
||||||
'simple-composefile_simple_run_1',
|
'simple-composefile_simple_run_*',
|
||||||
'exited'))
|
'exited'))
|
||||||
|
|
||||||
def test_run_handles_sighup(self):
|
def test_run_handles_sighup(self):
|
||||||
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
||||||
wait_on_condition(ContainerStateCondition(
|
wait_on_condition(ContainerStateCondition(
|
||||||
self.project.client,
|
self.project.client,
|
||||||
'simple-composefile_simple_run_1',
|
'simple-composefile_simple_run_*',
|
||||||
'running'))
|
'running'))
|
||||||
|
|
||||||
os.kill(proc.pid, signal.SIGHUP)
|
os.kill(proc.pid, signal.SIGHUP)
|
||||||
wait_on_condition(ContainerStateCondition(
|
wait_on_condition(ContainerStateCondition(
|
||||||
self.project.client,
|
self.project.client,
|
||||||
'simple-composefile_simple_run_1',
|
'simple-composefile_simple_run_*',
|
||||||
'exited'))
|
'exited'))
|
||||||
|
|
||||||
@mock.patch.dict(os.environ)
|
@mock.patch.dict(os.environ)
|
||||||
@ -2239,19 +2298,45 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
proc = start_process(self.base_dir, ['logs', '-f'])
|
proc = start_process(self.base_dir, ['logs', '-f'])
|
||||||
|
|
||||||
self.dispatch(['up', '-d', 'another'])
|
self.dispatch(['up', '-d', 'another'])
|
||||||
wait_on_condition(ContainerStateCondition(
|
another_name = self.project.get_service('another').get_container().name_without_project
|
||||||
self.project.client,
|
wait_on_condition(
|
||||||
'logs-composefile_another_1',
|
ContainerStateCondition(
|
||||||
'exited'))
|
self.project.client,
|
||||||
|
'logs-composefile_another_*',
|
||||||
|
'exited'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
simple_name = self.project.get_service('simple').get_container().name_without_project
|
||||||
self.dispatch(['kill', 'simple'])
|
self.dispatch(['kill', 'simple'])
|
||||||
|
|
||||||
result = wait_on_process(proc)
|
result = wait_on_process(proc)
|
||||||
|
|
||||||
assert 'hello' in result.stdout
|
assert 'hello' in result.stdout
|
||||||
assert 'test' in result.stdout
|
assert 'test' in result.stdout
|
||||||
assert 'logs-composefile_another_1 exited with code 0' in result.stdout
|
assert '{} exited with code 0'.format(another_name) in result.stdout
|
||||||
assert 'logs-composefile_simple_1 exited with code 137' in result.stdout
|
assert '{} exited with code 137'.format(simple_name) in result.stdout
|
||||||
|
|
||||||
|
def test_logs_follow_logs_from_restarted_containers(self):
|
||||||
|
self.base_dir = 'tests/fixtures/logs-restart-composefile'
|
||||||
|
proc = start_process(self.base_dir, ['up'])
|
||||||
|
|
||||||
|
wait_on_condition(
|
||||||
|
ContainerStateCondition(
|
||||||
|
self.project.client,
|
||||||
|
'logs-restart-composefile_another_*',
|
||||||
|
'exited'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.dispatch(['kill', 'simple'])
|
||||||
|
|
||||||
|
result = wait_on_process(proc)
|
||||||
|
|
||||||
|
assert len(re.findall(
|
||||||
|
r'logs-restart-composefile_another_1_[a-f0-9]{12} exited with code 1',
|
||||||
|
result.stdout
|
||||||
|
)) == 3
|
||||||
|
assert result.stdout.count('world') == 3
|
||||||
|
|
||||||
def test_logs_default(self):
|
def test_logs_default(self):
|
||||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||||
@ -2283,10 +2368,10 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
self.dispatch(['up'])
|
self.dispatch(['up'])
|
||||||
|
|
||||||
result = self.dispatch(['logs', '--tail', '2'])
|
result = self.dispatch(['logs', '--tail', '2'])
|
||||||
assert 'c\n' in result.stdout
|
assert 'y\n' in result.stdout
|
||||||
assert 'd\n' in result.stdout
|
assert 'z\n' in result.stdout
|
||||||
assert 'a\n' not in result.stdout
|
assert 'w\n' not in result.stdout
|
||||||
assert 'b\n' not in result.stdout
|
assert 'x\n' not in result.stdout
|
||||||
|
|
||||||
def test_kill(self):
|
def test_kill(self):
|
||||||
self.dispatch(['up', '-d'], None)
|
self.dispatch(['up', '-d'], None)
|
||||||
@ -2460,9 +2545,9 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
|
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
|
||||||
return result.stdout.rstrip()
|
return result.stdout.rstrip()
|
||||||
|
|
||||||
assert get_port(3000) == containers[0].get_local_port(3000)
|
assert get_port(3000) in (containers[0].get_local_port(3000), containers[1].get_local_port(3000))
|
||||||
assert get_port(3000, index=1) == containers[0].get_local_port(3000)
|
assert get_port(3000, index=containers[0].number) == containers[0].get_local_port(3000)
|
||||||
assert get_port(3000, index=2) == containers[1].get_local_port(3000)
|
assert get_port(3000, index=containers[1].number) == containers[1].get_local_port(3000)
|
||||||
assert get_port(3002) == ""
|
assert get_port(3002) == ""
|
||||||
|
|
||||||
def test_events_json(self):
|
def test_events_json(self):
|
||||||
@ -2498,7 +2583,7 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
|
|
||||||
container, = self.project.containers()
|
container, = self.project.containers()
|
||||||
expected_template = ' container {} {}'
|
expected_template = ' container {} {}'
|
||||||
expected_meta_info = ['image=busybox:latest', 'name=simple-composefile_simple_1']
|
expected_meta_info = ['image=busybox:latest', 'name=simple-composefile_simple_']
|
||||||
|
|
||||||
assert expected_template.format('create', container.id) in lines[0]
|
assert expected_template.format('create', container.id) in lines[0]
|
||||||
assert expected_template.format('start', container.id) in lines[1]
|
assert expected_template.format('start', container.id) in lines[1]
|
||||||
@ -2580,8 +2665,11 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
|
|
||||||
assert len(containers) == 2
|
assert len(containers) == 2
|
||||||
web = containers[1]
|
web = containers[1]
|
||||||
|
db_name = containers[0].name_without_project
|
||||||
|
|
||||||
assert set(get_links(web)) == set(['db', 'mydb_1', 'extends_mydb_1'])
|
assert set(get_links(web)) == set(
|
||||||
|
['db', db_name, 'extends_{}'.format(db_name)]
|
||||||
|
)
|
||||||
|
|
||||||
expected_env = set([
|
expected_env = set([
|
||||||
"FOO=1",
|
"FOO=1",
|
||||||
@ -2614,17 +2702,27 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
self.base_dir = 'tests/fixtures/exit-code-from'
|
self.base_dir = 'tests/fixtures/exit-code-from'
|
||||||
proc = start_process(
|
proc = start_process(
|
||||||
self.base_dir,
|
self.base_dir,
|
||||||
['up', '--abort-on-container-exit', '--exit-code-from', 'another'])
|
['up', '--abort-on-container-exit', '--exit-code-from', 'another']
|
||||||
|
)
|
||||||
|
|
||||||
result = wait_on_process(proc, returncode=1)
|
result = wait_on_process(proc, returncode=1)
|
||||||
|
assert re.findall(r'exit-code-from_another_1_[a-f0-9]{12} exited with code 1', result.stdout)
|
||||||
|
|
||||||
assert 'exit-code-from_another_1 exited with code 1' in result.stdout
|
def test_exit_code_from_signal_stop(self):
|
||||||
|
self.base_dir = 'tests/fixtures/exit-code-from'
|
||||||
|
proc = start_process(
|
||||||
|
self.base_dir,
|
||||||
|
['up', '--abort-on-container-exit', '--exit-code-from', 'simple']
|
||||||
|
)
|
||||||
|
result = wait_on_process(proc, returncode=137) # SIGKILL
|
||||||
|
name = self.project.get_service('another').containers(stopped=True)[0].name_without_project
|
||||||
|
assert '{} exited with code 1'.format(name) in result.stdout
|
||||||
|
|
||||||
def test_images(self):
|
def test_images(self):
|
||||||
self.project.get_service('simple').create_container()
|
self.project.get_service('simple').create_container()
|
||||||
result = self.dispatch(['images'])
|
result = self.dispatch(['images'])
|
||||||
assert 'busybox' in result.stdout
|
assert 'busybox' in result.stdout
|
||||||
assert 'simple-composefile_simple_1' in result.stdout
|
assert 'simple-composefile_simple_' in result.stdout
|
||||||
|
|
||||||
def test_images_default_composefile(self):
|
def test_images_default_composefile(self):
|
||||||
self.base_dir = 'tests/fixtures/multiple-composefiles'
|
self.base_dir = 'tests/fixtures/multiple-composefiles'
|
||||||
@ -2672,3 +2770,13 @@ class CLITestCase(DockerClientTestCase):
|
|||||||
with pytest.raises(DuplicateOverrideFileFound):
|
with pytest.raises(DuplicateOverrideFileFound):
|
||||||
get_project(self.base_dir, [])
|
get_project(self.base_dir, [])
|
||||||
self.base_dir = None
|
self.base_dir = None
|
||||||
|
|
||||||
|
def test_images_use_service_tag(self):
|
||||||
|
pull_busybox(self.client)
|
||||||
|
self.base_dir = 'tests/fixtures/images-service-tag'
|
||||||
|
self.dispatch(['up', '-d', '--build'])
|
||||||
|
result = self.dispatch(['images'])
|
||||||
|
|
||||||
|
assert re.search(r'foo1.+test[ \t]+dev', result.stdout) is not None
|
||||||
|
assert re.search(r'foo2.+test[ \t]+prod', result.stdout) is not None
|
||||||
|
assert re.search(r'foo3.+_foo3[ \t]+latest', result.stdout) is not None
|
||||||
|
4
tests/fixtures/build-multiple-composefile/a/Dockerfile
vendored
Normal file
4
tests/fixtures/build-multiple-composefile/a/Dockerfile
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
|
||||||
|
FROM busybox:latest
|
||||||
|
RUN echo a
|
||||||
|
CMD top
|
4
tests/fixtures/build-multiple-composefile/b/Dockerfile
vendored
Normal file
4
tests/fixtures/build-multiple-composefile/b/Dockerfile
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
|
||||||
|
FROM busybox:latest
|
||||||
|
RUN echo b
|
||||||
|
CMD top
|
8
tests/fixtures/build-multiple-composefile/docker-compose.yml
vendored
Normal file
8
tests/fixtures/build-multiple-composefile/docker-compose.yml
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
|
||||||
|
version: "2"
|
||||||
|
|
||||||
|
services:
|
||||||
|
a:
|
||||||
|
build: ./a
|
||||||
|
b:
|
||||||
|
build: ./b
|
4
tests/fixtures/default-env-file/alt/.env
vendored
Normal file
4
tests/fixtures/default-env-file/alt/.env
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
IMAGE=alpine:3.4
|
||||||
|
COMMAND=echo uwu
|
||||||
|
PORT1=3341
|
||||||
|
PORT2=4449
|
@ -1,4 +1,6 @@
|
|||||||
web:
|
version: '2.4'
|
||||||
|
services:
|
||||||
|
web:
|
||||||
image: ${IMAGE}
|
image: ${IMAGE}
|
||||||
command: ${COMMAND}
|
command: ${COMMAND}
|
||||||
ports:
|
ports:
|
||||||
|
2
tests/fixtures/images-service-tag/Dockerfile
vendored
Normal file
2
tests/fixtures/images-service-tag/Dockerfile
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
FROM busybox:latest
|
||||||
|
RUN touch /foo
|
10
tests/fixtures/images-service-tag/docker-compose.yml
vendored
Normal file
10
tests/fixtures/images-service-tag/docker-compose.yml
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
version: "2.4"
|
||||||
|
services:
|
||||||
|
foo1:
|
||||||
|
build: .
|
||||||
|
image: test:dev
|
||||||
|
foo2:
|
||||||
|
build: .
|
||||||
|
image: test:prod
|
||||||
|
foo3:
|
||||||
|
build: .
|
7
tests/fixtures/logs-restart-composefile/docker-compose.yml
vendored
Normal file
7
tests/fixtures/logs-restart-composefile/docker-compose.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
simple:
|
||||||
|
image: busybox:latest
|
||||||
|
command: sh -c "echo hello && tail -f /dev/null"
|
||||||
|
another:
|
||||||
|
image: busybox:latest
|
||||||
|
command: sh -c "sleep 0.5 && echo world && /bin/false"
|
||||||
|
restart: "on-failure:2"
|
@ -1,3 +1,3 @@
|
|||||||
simple:
|
simple:
|
||||||
image: busybox:latest
|
image: busybox:latest
|
||||||
command: sh -c "echo a && echo b && echo c && echo d"
|
command: sh -c "echo w && echo x && echo y && echo z"
|
||||||
|
6
tests/fixtures/networks/docker-compose.yml
vendored
6
tests/fixtures/networks/docker-compose.yml
vendored
@ -2,17 +2,17 @@ version: "2"
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
web:
|
web:
|
||||||
image: busybox
|
image: alpine:3.7
|
||||||
command: top
|
command: top
|
||||||
networks: ["front"]
|
networks: ["front"]
|
||||||
app:
|
app:
|
||||||
image: busybox
|
image: alpine:3.7
|
||||||
command: top
|
command: top
|
||||||
networks: ["front", "back"]
|
networks: ["front", "back"]
|
||||||
links:
|
links:
|
||||||
- "db:database"
|
- "db:database"
|
||||||
db:
|
db:
|
||||||
image: busybox
|
image: alpine:3.7
|
||||||
command: top
|
command: top
|
||||||
networks: ["back"]
|
networks: ["back"]
|
||||||
|
|
||||||
|
@ -90,7 +90,8 @@ class ProjectTest(DockerClientTestCase):
|
|||||||
project.up()
|
project.up()
|
||||||
|
|
||||||
containers = project.containers(['web'])
|
containers = project.containers(['web'])
|
||||||
assert [c.name for c in containers] == ['composetest_web_1']
|
assert len(containers) == 1
|
||||||
|
assert containers[0].name.startswith('composetest_web_')
|
||||||
|
|
||||||
def test_containers_with_extra_service(self):
|
def test_containers_with_extra_service(self):
|
||||||
web = self.create_service('web')
|
web = self.create_service('web')
|
||||||
@ -431,7 +432,7 @@ class ProjectTest(DockerClientTestCase):
|
|||||||
project.up(strategy=ConvergenceStrategy.always)
|
project.up(strategy=ConvergenceStrategy.always)
|
||||||
assert len(project.containers()) == 2
|
assert len(project.containers()) == 2
|
||||||
|
|
||||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
db_container = [c for c in project.containers() if c.service == 'db'][0]
|
||||||
assert db_container.id != old_db_id
|
assert db_container.id != old_db_id
|
||||||
assert db_container.get('Volumes./etc') == db_volume_path
|
assert db_container.get('Volumes./etc') == db_volume_path
|
||||||
|
|
||||||
@ -451,7 +452,7 @@ class ProjectTest(DockerClientTestCase):
|
|||||||
project.up(strategy=ConvergenceStrategy.always)
|
project.up(strategy=ConvergenceStrategy.always)
|
||||||
assert len(project.containers()) == 2
|
assert len(project.containers()) == 2
|
||||||
|
|
||||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
db_container = [c for c in project.containers() if c.service == 'db'][0]
|
||||||
assert db_container.id != old_db_id
|
assert db_container.id != old_db_id
|
||||||
assert db_container.get_mount('/etc')['Source'] == db_volume_path
|
assert db_container.get_mount('/etc')['Source'] == db_volume_path
|
||||||
|
|
||||||
@ -464,14 +465,14 @@ class ProjectTest(DockerClientTestCase):
|
|||||||
|
|
||||||
project.up(['db'])
|
project.up(['db'])
|
||||||
assert len(project.containers()) == 1
|
assert len(project.containers()) == 1
|
||||||
old_db_id = project.containers()[0].id
|
|
||||||
container, = project.containers()
|
container, = project.containers()
|
||||||
|
old_db_id = container.id
|
||||||
db_volume_path = container.get_mount('/var/db')['Source']
|
db_volume_path = container.get_mount('/var/db')['Source']
|
||||||
|
|
||||||
project.up(strategy=ConvergenceStrategy.never)
|
project.up(strategy=ConvergenceStrategy.never)
|
||||||
assert len(project.containers()) == 2
|
assert len(project.containers()) == 2
|
||||||
|
|
||||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
db_container = [c for c in project.containers() if c.name == container.name][0]
|
||||||
assert db_container.id == old_db_id
|
assert db_container.id == old_db_id
|
||||||
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
|
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
|
||||||
|
|
||||||
@ -498,7 +499,7 @@ class ProjectTest(DockerClientTestCase):
|
|||||||
assert len(new_containers) == 2
|
assert len(new_containers) == 2
|
||||||
assert [c.is_running for c in new_containers] == [True, True]
|
assert [c.is_running for c in new_containers] == [True, True]
|
||||||
|
|
||||||
db_container = [c for c in new_containers if 'db' in c.name][0]
|
db_container = [c for c in new_containers if c.service == 'db'][0]
|
||||||
assert db_container.id == old_db_id
|
assert db_container.id == old_db_id
|
||||||
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
|
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
|
||||||
|
|
||||||
@ -1944,7 +1945,7 @@ class ProjectTest(DockerClientTestCase):
|
|||||||
|
|
||||||
containers = project.containers(stopped=True)
|
containers = project.containers(stopped=True)
|
||||||
assert len(containers) == 1
|
assert len(containers) == 1
|
||||||
assert containers[0].name == 'underscoretest_svc1_1'
|
assert containers[0].name.startswith('underscoretest_svc1_')
|
||||||
assert containers[0].project == '_underscoretest'
|
assert containers[0].project == '_underscoretest'
|
||||||
|
|
||||||
full_vol_name = 'underscoretest_foo'
|
full_vol_name = 'underscoretest_foo'
|
||||||
@ -1965,7 +1966,7 @@ class ProjectTest(DockerClientTestCase):
|
|||||||
|
|
||||||
containers = project2.containers(stopped=True)
|
containers = project2.containers(stopped=True)
|
||||||
assert len(containers) == 1
|
assert len(containers) == 1
|
||||||
assert containers[0].name == 'dashtest_svc1_1'
|
assert containers[0].name.startswith('dashtest_svc1_')
|
||||||
assert containers[0].project == '-dashtest'
|
assert containers[0].project == '-dashtest'
|
||||||
|
|
||||||
full_vol_name = 'dashtest_foo'
|
full_vol_name = 'dashtest_foo'
|
||||||
|
@ -32,6 +32,7 @@ from compose.const import LABEL_CONTAINER_NUMBER
|
|||||||
from compose.const import LABEL_ONE_OFF
|
from compose.const import LABEL_ONE_OFF
|
||||||
from compose.const import LABEL_PROJECT
|
from compose.const import LABEL_PROJECT
|
||||||
from compose.const import LABEL_SERVICE
|
from compose.const import LABEL_SERVICE
|
||||||
|
from compose.const import LABEL_SLUG
|
||||||
from compose.const import LABEL_VERSION
|
from compose.const import LABEL_VERSION
|
||||||
from compose.container import Container
|
from compose.container import Container
|
||||||
from compose.errors import OperationFailedError
|
from compose.errors import OperationFailedError
|
||||||
@ -67,7 +68,7 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
create_and_start_container(foo)
|
create_and_start_container(foo)
|
||||||
|
|
||||||
assert len(foo.containers()) == 1
|
assert len(foo.containers()) == 1
|
||||||
assert foo.containers()[0].name == 'composetest_foo_1'
|
assert foo.containers()[0].name.startswith('composetest_foo_')
|
||||||
assert len(bar.containers()) == 0
|
assert len(bar.containers()) == 0
|
||||||
|
|
||||||
create_and_start_container(bar)
|
create_and_start_container(bar)
|
||||||
@ -77,8 +78,8 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
assert len(bar.containers()) == 2
|
assert len(bar.containers()) == 2
|
||||||
|
|
||||||
names = [c.name for c in bar.containers()]
|
names = [c.name for c in bar.containers()]
|
||||||
assert 'composetest_bar_1' in names
|
assert len(names) == 2
|
||||||
assert 'composetest_bar_2' in names
|
assert all(name.startswith('composetest_bar_') for name in names)
|
||||||
|
|
||||||
def test_containers_one_off(self):
|
def test_containers_one_off(self):
|
||||||
db = self.create_service('db')
|
db = self.create_service('db')
|
||||||
@ -89,18 +90,18 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
def test_project_is_added_to_container_name(self):
|
def test_project_is_added_to_container_name(self):
|
||||||
service = self.create_service('web')
|
service = self.create_service('web')
|
||||||
create_and_start_container(service)
|
create_and_start_container(service)
|
||||||
assert service.containers()[0].name == 'composetest_web_1'
|
assert service.containers()[0].name.startswith('composetest_web_')
|
||||||
|
|
||||||
def test_create_container_with_one_off(self):
|
def test_create_container_with_one_off(self):
|
||||||
db = self.create_service('db')
|
db = self.create_service('db')
|
||||||
container = db.create_container(one_off=True)
|
container = db.create_container(one_off=True)
|
||||||
assert container.name == 'composetest_db_run_1'
|
assert container.name.startswith('composetest_db_run_')
|
||||||
|
|
||||||
def test_create_container_with_one_off_when_existing_container_is_running(self):
|
def test_create_container_with_one_off_when_existing_container_is_running(self):
|
||||||
db = self.create_service('db')
|
db = self.create_service('db')
|
||||||
db.start()
|
db.start()
|
||||||
container = db.create_container(one_off=True)
|
container = db.create_container(one_off=True)
|
||||||
assert container.name == 'composetest_db_run_1'
|
assert container.name.startswith('composetest_db_run_')
|
||||||
|
|
||||||
def test_create_container_with_unspecified_volume(self):
|
def test_create_container_with_unspecified_volume(self):
|
||||||
service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
|
service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
|
||||||
@ -489,7 +490,7 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
assert old_container.get('Config.Entrypoint') == ['top']
|
assert old_container.get('Config.Entrypoint') == ['top']
|
||||||
assert old_container.get('Config.Cmd') == ['-d', '1']
|
assert old_container.get('Config.Cmd') == ['-d', '1']
|
||||||
assert 'FOO=1' in old_container.get('Config.Env')
|
assert 'FOO=1' in old_container.get('Config.Env')
|
||||||
assert old_container.name == 'composetest_db_1'
|
assert old_container.name.startswith('composetest_db_')
|
||||||
service.start_container(old_container)
|
service.start_container(old_container)
|
||||||
old_container.inspect() # reload volume data
|
old_container.inspect() # reload volume data
|
||||||
volume_path = old_container.get_mount('/etc')['Source']
|
volume_path = old_container.get_mount('/etc')['Source']
|
||||||
@ -503,7 +504,7 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
assert new_container.get('Config.Entrypoint') == ['top']
|
assert new_container.get('Config.Entrypoint') == ['top']
|
||||||
assert new_container.get('Config.Cmd') == ['-d', '1']
|
assert new_container.get('Config.Cmd') == ['-d', '1']
|
||||||
assert 'FOO=2' in new_container.get('Config.Env')
|
assert 'FOO=2' in new_container.get('Config.Env')
|
||||||
assert new_container.name == 'composetest_db_1'
|
assert new_container.name.startswith('composetest_db_')
|
||||||
assert new_container.get_mount('/etc')['Source'] == volume_path
|
assert new_container.get_mount('/etc')['Source'] == volume_path
|
||||||
if not is_cluster(self.client):
|
if not is_cluster(self.client):
|
||||||
assert (
|
assert (
|
||||||
@ -836,13 +837,13 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
db = self.create_service('db')
|
db = self.create_service('db')
|
||||||
web = self.create_service('web', links=[(db, None)])
|
web = self.create_service('web', links=[(db, None)])
|
||||||
|
|
||||||
create_and_start_container(db)
|
db1 = create_and_start_container(db)
|
||||||
create_and_start_container(db)
|
db2 = create_and_start_container(db)
|
||||||
create_and_start_container(web)
|
create_and_start_container(web)
|
||||||
|
|
||||||
assert set(get_links(web.containers()[0])) == set([
|
assert set(get_links(web.containers()[0])) == set([
|
||||||
'composetest_db_1', 'db_1',
|
db1.name, db1.name_without_project,
|
||||||
'composetest_db_2', 'db_2',
|
db2.name, db2.name_without_project,
|
||||||
'db'
|
'db'
|
||||||
])
|
])
|
||||||
|
|
||||||
@ -851,30 +852,33 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
db = self.create_service('db')
|
db = self.create_service('db')
|
||||||
web = self.create_service('web', links=[(db, 'custom_link_name')])
|
web = self.create_service('web', links=[(db, 'custom_link_name')])
|
||||||
|
|
||||||
create_and_start_container(db)
|
db1 = create_and_start_container(db)
|
||||||
create_and_start_container(db)
|
db2 = create_and_start_container(db)
|
||||||
create_and_start_container(web)
|
create_and_start_container(web)
|
||||||
|
|
||||||
assert set(get_links(web.containers()[0])) == set([
|
assert set(get_links(web.containers()[0])) == set([
|
||||||
'composetest_db_1', 'db_1',
|
db1.name, db1.name_without_project,
|
||||||
'composetest_db_2', 'db_2',
|
db2.name, db2.name_without_project,
|
||||||
'custom_link_name'
|
'custom_link_name'
|
||||||
])
|
])
|
||||||
|
|
||||||
@no_cluster('No legacy links support in Swarm')
|
@no_cluster('No legacy links support in Swarm')
|
||||||
def test_start_container_with_external_links(self):
|
def test_start_container_with_external_links(self):
|
||||||
db = self.create_service('db')
|
db = self.create_service('db')
|
||||||
web = self.create_service('web', external_links=['composetest_db_1',
|
db_ctnrs = [create_and_start_container(db) for _ in range(3)]
|
||||||
'composetest_db_2',
|
web = self.create_service(
|
||||||
'composetest_db_3:db_3'])
|
'web', external_links=[
|
||||||
|
db_ctnrs[0].name,
|
||||||
|
db_ctnrs[1].name,
|
||||||
|
'{}:db_3'.format(db_ctnrs[2].name)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
for _ in range(3):
|
|
||||||
create_and_start_container(db)
|
|
||||||
create_and_start_container(web)
|
create_and_start_container(web)
|
||||||
|
|
||||||
assert set(get_links(web.containers()[0])) == set([
|
assert set(get_links(web.containers()[0])) == set([
|
||||||
'composetest_db_1',
|
db_ctnrs[0].name,
|
||||||
'composetest_db_2',
|
db_ctnrs[1].name,
|
||||||
'db_3'
|
'db_3'
|
||||||
])
|
])
|
||||||
|
|
||||||
@ -892,14 +896,14 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
def test_start_one_off_container_creates_links_to_its_own_service(self):
|
def test_start_one_off_container_creates_links_to_its_own_service(self):
|
||||||
db = self.create_service('db')
|
db = self.create_service('db')
|
||||||
|
|
||||||
create_and_start_container(db)
|
db1 = create_and_start_container(db)
|
||||||
create_and_start_container(db)
|
db2 = create_and_start_container(db)
|
||||||
|
|
||||||
c = create_and_start_container(db, one_off=OneOffFilter.only)
|
c = create_and_start_container(db, one_off=OneOffFilter.only)
|
||||||
|
|
||||||
assert set(get_links(c)) == set([
|
assert set(get_links(c)) == set([
|
||||||
'composetest_db_1', 'db_1',
|
db1.name, db1.name_without_project,
|
||||||
'composetest_db_2', 'db_2',
|
db2.name, db2.name_without_project,
|
||||||
'db'
|
'db'
|
||||||
])
|
])
|
||||||
|
|
||||||
@ -1249,10 +1253,9 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
test that those containers are restarted and not removed/recreated.
|
test that those containers are restarted and not removed/recreated.
|
||||||
"""
|
"""
|
||||||
service = self.create_service('web')
|
service = self.create_service('web')
|
||||||
next_number = service._next_container_number()
|
valid_numbers = [service._next_container_number(), service._next_container_number()]
|
||||||
valid_numbers = [next_number, next_number + 1]
|
service.create_container(number=valid_numbers[0])
|
||||||
service.create_container(number=next_number)
|
service.create_container(number=valid_numbers[1])
|
||||||
service.create_container(number=next_number + 1)
|
|
||||||
|
|
||||||
ParallelStreamWriter.instance = None
|
ParallelStreamWriter.instance = None
|
||||||
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
|
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
|
||||||
@ -1310,10 +1313,8 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
|
|
||||||
assert len(service.containers()) == 1
|
assert len(service.containers()) == 1
|
||||||
assert service.containers()[0].is_running
|
assert service.containers()[0].is_running
|
||||||
assert (
|
assert "ERROR: for composetest_web_" in mock_stderr.getvalue()
|
||||||
"ERROR: for composetest_web_2 Cannot create container for service"
|
assert "Cannot create container for service web: Boom" in mock_stderr.getvalue()
|
||||||
" web: Boom" in mock_stderr.getvalue()
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_scale_with_unexpected_exception(self):
|
def test_scale_with_unexpected_exception(self):
|
||||||
"""Test that when scaling if the API returns an error, that is not of type
|
"""Test that when scaling if the API returns an error, that is not of type
|
||||||
@ -1580,18 +1581,20 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
compose_labels = {
|
compose_labels = {
|
||||||
LABEL_CONTAINER_NUMBER: '1',
|
|
||||||
LABEL_ONE_OFF: 'False',
|
LABEL_ONE_OFF: 'False',
|
||||||
LABEL_PROJECT: 'composetest',
|
LABEL_PROJECT: 'composetest',
|
||||||
LABEL_SERVICE: 'web',
|
LABEL_SERVICE: 'web',
|
||||||
LABEL_VERSION: __version__,
|
LABEL_VERSION: __version__,
|
||||||
|
LABEL_CONTAINER_NUMBER: '1'
|
||||||
}
|
}
|
||||||
expected = dict(labels_dict, **compose_labels)
|
expected = dict(labels_dict, **compose_labels)
|
||||||
|
|
||||||
service = self.create_service('web', labels=labels_dict)
|
service = self.create_service('web', labels=labels_dict)
|
||||||
labels = create_and_start_container(service).labels.items()
|
ctnr = create_and_start_container(service)
|
||||||
|
labels = ctnr.labels.items()
|
||||||
for pair in expected.items():
|
for pair in expected.items():
|
||||||
assert pair in labels
|
assert pair in labels
|
||||||
|
assert ctnr.labels[LABEL_SLUG] == ctnr.full_slug
|
||||||
|
|
||||||
def test_empty_labels(self):
|
def test_empty_labels(self):
|
||||||
labels_dict = {'foo': '', 'bar': ''}
|
labels_dict = {'foo': '', 'bar': ''}
|
||||||
@ -1655,7 +1658,7 @@ class ServiceTest(DockerClientTestCase):
|
|||||||
def test_duplicate_containers(self):
|
def test_duplicate_containers(self):
|
||||||
service = self.create_service('web')
|
service = self.create_service('web')
|
||||||
|
|
||||||
options = service._get_container_create_options({}, 1)
|
options = service._get_container_create_options({}, service._next_container_number())
|
||||||
original = Container.create(service.client, **options)
|
original = Container.create(service.client, **options)
|
||||||
|
|
||||||
assert set(service.containers(stopped=True)) == set([original])
|
assert set(service.containers(stopped=True)) == set([original])
|
||||||
|
@ -55,8 +55,8 @@ class BasicProjectTest(ProjectTestCase):
|
|||||||
|
|
||||||
def test_partial_change(self):
|
def test_partial_change(self):
|
||||||
old_containers = self.run_up(self.cfg)
|
old_containers = self.run_up(self.cfg)
|
||||||
old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
|
old_db = [c for c in old_containers if c.name_without_project.startswith('db_')][0]
|
||||||
old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
|
old_web = [c for c in old_containers if c.name_without_project.startswith('web_')][0]
|
||||||
|
|
||||||
self.cfg['web']['command'] = '/bin/true'
|
self.cfg['web']['command'] = '/bin/true'
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ class BasicProjectTest(ProjectTestCase):
|
|||||||
|
|
||||||
created = list(new_containers - old_containers)
|
created = list(new_containers - old_containers)
|
||||||
assert len(created) == 1
|
assert len(created) == 1
|
||||||
assert created[0].name_without_project == 'web_1'
|
assert created[0].name_without_project == old_web.name_without_project
|
||||||
assert created[0].get('Config.Cmd') == ['/bin/true']
|
assert created[0].get('Config.Cmd') == ['/bin/true']
|
||||||
|
|
||||||
def test_all_change(self):
|
def test_all_change(self):
|
||||||
@ -114,7 +114,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
|||||||
|
|
||||||
def test_up(self):
|
def test_up(self):
|
||||||
containers = self.run_up(self.cfg)
|
containers = self.run_up(self.cfg)
|
||||||
assert set(c.name_without_project for c in containers) == set(['db_1', 'web_1', 'nginx_1'])
|
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
|
||||||
|
|
||||||
def test_change_leaf(self):
|
def test_change_leaf(self):
|
||||||
old_containers = self.run_up(self.cfg)
|
old_containers = self.run_up(self.cfg)
|
||||||
@ -122,7 +122,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
|||||||
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
|
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
|
||||||
new_containers = self.run_up(self.cfg)
|
new_containers = self.run_up(self.cfg)
|
||||||
|
|
||||||
assert set(c.name_without_project for c in new_containers - old_containers) == set(['nginx_1'])
|
assert set(c.service for c in new_containers - old_containers) == set(['nginx'])
|
||||||
|
|
||||||
def test_change_middle(self):
|
def test_change_middle(self):
|
||||||
old_containers = self.run_up(self.cfg)
|
old_containers = self.run_up(self.cfg)
|
||||||
@ -130,7 +130,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
|||||||
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
||||||
new_containers = self.run_up(self.cfg)
|
new_containers = self.run_up(self.cfg)
|
||||||
|
|
||||||
assert set(c.name_without_project for c in new_containers - old_containers) == set(['web_1'])
|
assert set(c.service for c in new_containers - old_containers) == set(['web'])
|
||||||
|
|
||||||
def test_change_middle_always_recreate_deps(self):
|
def test_change_middle_always_recreate_deps(self):
|
||||||
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||||
@ -138,8 +138,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
|||||||
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
||||||
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||||
|
|
||||||
assert set(c.name_without_project
|
assert set(c.service for c in new_containers - old_containers) == {'web', 'nginx'}
|
||||||
for c in new_containers - old_containers) == {'web_1', 'nginx_1'}
|
|
||||||
|
|
||||||
def test_change_root(self):
|
def test_change_root(self):
|
||||||
old_containers = self.run_up(self.cfg)
|
old_containers = self.run_up(self.cfg)
|
||||||
@ -147,7 +146,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
|||||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||||
new_containers = self.run_up(self.cfg)
|
new_containers = self.run_up(self.cfg)
|
||||||
|
|
||||||
assert set(c.name_without_project for c in new_containers - old_containers) == set(['db_1'])
|
assert set(c.service for c in new_containers - old_containers) == set(['db'])
|
||||||
|
|
||||||
def test_change_root_always_recreate_deps(self):
|
def test_change_root_always_recreate_deps(self):
|
||||||
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||||
@ -155,8 +154,9 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
|||||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||||
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||||
|
|
||||||
assert set(c.name_without_project
|
assert set(c.service for c in new_containers - old_containers) == {
|
||||||
for c in new_containers - old_containers) == {'db_1', 'web_1', 'nginx_1'}
|
'db', 'web', 'nginx'
|
||||||
|
}
|
||||||
|
|
||||||
def test_change_root_no_recreate(self):
|
def test_change_root_no_recreate(self):
|
||||||
old_containers = self.run_up(self.cfg)
|
old_containers = self.run_up(self.cfg)
|
||||||
@ -195,9 +195,18 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
|||||||
|
|
||||||
web, = [c for c in containers if c.service == 'web']
|
web, = [c for c in containers if c.service == 'web']
|
||||||
nginx, = [c for c in containers if c.service == 'nginx']
|
nginx, = [c for c in containers if c.service == 'nginx']
|
||||||
|
db, = [c for c in containers if c.service == 'db']
|
||||||
|
|
||||||
assert set(get_links(web)) == {'composetest_db_1', 'db', 'db_1'}
|
assert set(get_links(web)) == {
|
||||||
assert set(get_links(nginx)) == {'composetest_web_1', 'web', 'web_1'}
|
'composetest_db_{}_{}'.format(db.number, db.slug),
|
||||||
|
'db',
|
||||||
|
'db_{}_{}'.format(db.number, db.slug)
|
||||||
|
}
|
||||||
|
assert set(get_links(nginx)) == {
|
||||||
|
'composetest_web_{}_{}'.format(web.number, web.slug),
|
||||||
|
'web',
|
||||||
|
'web_{}_{}'.format(web.number, web.slug)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class ServiceStateTest(DockerClientTestCase):
|
class ServiceStateTest(DockerClientTestCase):
|
||||||
|
@ -139,7 +139,9 @@ class DockerClientTestCase(unittest.TestCase):
|
|||||||
def check_build(self, *args, **kwargs):
|
def check_build(self, *args, **kwargs):
|
||||||
kwargs.setdefault('rm', True)
|
kwargs.setdefault('rm', True)
|
||||||
build_output = self.client.build(*args, **kwargs)
|
build_output = self.client.build(*args, **kwargs)
|
||||||
stream_output(build_output, open('/dev/null', 'w'))
|
with open(os.devnull, 'w') as devnull:
|
||||||
|
for event in stream_output(build_output, devnull):
|
||||||
|
pass
|
||||||
|
|
||||||
def require_api_version(self, minimum):
|
def require_api_version(self, minimum):
|
||||||
api_version = self.client.version()['ApiVersion']
|
api_version = self.client.version()['ApiVersion']
|
||||||
|
@ -1291,7 +1291,7 @@ class ConfigTest(unittest.TestCase):
|
|||||||
assert tmpfs_mount.target == '/tmpfs'
|
assert tmpfs_mount.target == '/tmpfs'
|
||||||
assert not tmpfs_mount.is_named_volume
|
assert not tmpfs_mount.is_named_volume
|
||||||
|
|
||||||
assert host_mount.source == os.path.normpath('/abc')
|
assert host_mount.source == '/abc'
|
||||||
assert host_mount.target == '/xyz'
|
assert host_mount.target == '/xyz'
|
||||||
assert not host_mount.is_named_volume
|
assert not host_mount.is_named_volume
|
||||||
|
|
||||||
@ -5096,3 +5096,19 @@ class SerializeTest(unittest.TestCase):
|
|||||||
serialized_config = yaml.load(serialize_config(config_dict))
|
serialized_config = yaml.load(serialize_config(config_dict))
|
||||||
serialized_service = serialized_config['services']['web']
|
serialized_service = serialized_config['services']['web']
|
||||||
assert serialized_service['command'] == 'echo 十六夜 咲夜'
|
assert serialized_service['command'] == 'echo 十六夜 咲夜'
|
||||||
|
|
||||||
|
def test_serialize_external_false(self):
|
||||||
|
cfg = {
|
||||||
|
'version': '3.4',
|
||||||
|
'volumes': {
|
||||||
|
'test': {
|
||||||
|
'name': 'test-false',
|
||||||
|
'external': False
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config_dict = config.load(build_config_details(cfg))
|
||||||
|
serialized_config = yaml.load(serialize_config(config_dict))
|
||||||
|
serialized_volume = serialized_config['volumes']['test']
|
||||||
|
assert serialized_volume['external'] is False
|
||||||
|
@ -30,7 +30,8 @@ class ContainerTest(unittest.TestCase):
|
|||||||
"Labels": {
|
"Labels": {
|
||||||
"com.docker.compose.project": "composetest",
|
"com.docker.compose.project": "composetest",
|
||||||
"com.docker.compose.service": "web",
|
"com.docker.compose.service": "web",
|
||||||
"com.docker.compose.container-number": 7,
|
"com.docker.compose.container-number": "7",
|
||||||
|
"com.docker.compose.slug": "092cd63296fdc446ad432d3905dd1fcbe12a2ba6b52"
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -88,7 +89,7 @@ class ContainerTest(unittest.TestCase):
|
|||||||
def test_name_without_project(self):
|
def test_name_without_project(self):
|
||||||
self.container_dict['Name'] = "/composetest_web_7"
|
self.container_dict['Name'] = "/composetest_web_7"
|
||||||
container = Container(None, self.container_dict, has_been_inspected=True)
|
container = Container(None, self.container_dict, has_been_inspected=True)
|
||||||
assert container.name_without_project == "web_7"
|
assert container.name_without_project == "web_7_092cd63296fd"
|
||||||
|
|
||||||
def test_name_without_project_custom_container_name(self):
|
def test_name_without_project_custom_container_name(self):
|
||||||
self.container_dict['Name'] = "/custom_name_of_container"
|
self.container_dict['Name'] = "/custom_name_of_container"
|
||||||
|
@ -21,7 +21,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
|||||||
b'31019763, "start": 1413653874, "total": 62763875}, '
|
b'31019763, "start": 1413653874, "total": 62763875}, '
|
||||||
b'"progress": "..."}',
|
b'"progress": "..."}',
|
||||||
]
|
]
|
||||||
events = progress_stream.stream_output(output, StringIO())
|
events = list(progress_stream.stream_output(output, StringIO()))
|
||||||
assert len(events) == 1
|
assert len(events) == 1
|
||||||
|
|
||||||
def test_stream_output_div_zero(self):
|
def test_stream_output_div_zero(self):
|
||||||
@ -30,7 +30,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
|||||||
b'0, "start": 1413653874, "total": 0}, '
|
b'0, "start": 1413653874, "total": 0}, '
|
||||||
b'"progress": "..."}',
|
b'"progress": "..."}',
|
||||||
]
|
]
|
||||||
events = progress_stream.stream_output(output, StringIO())
|
events = list(progress_stream.stream_output(output, StringIO()))
|
||||||
assert len(events) == 1
|
assert len(events) == 1
|
||||||
|
|
||||||
def test_stream_output_null_total(self):
|
def test_stream_output_null_total(self):
|
||||||
@ -39,7 +39,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
|||||||
b'0, "start": 1413653874, "total": null}, '
|
b'0, "start": 1413653874, "total": null}, '
|
||||||
b'"progress": "..."}',
|
b'"progress": "..."}',
|
||||||
]
|
]
|
||||||
events = progress_stream.stream_output(output, StringIO())
|
events = list(progress_stream.stream_output(output, StringIO()))
|
||||||
assert len(events) == 1
|
assert len(events) == 1
|
||||||
|
|
||||||
def test_stream_output_progress_event_tty(self):
|
def test_stream_output_progress_event_tty(self):
|
||||||
@ -52,7 +52,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
output = TTYStringIO()
|
output = TTYStringIO()
|
||||||
events = progress_stream.stream_output(events, output)
|
events = list(progress_stream.stream_output(events, output))
|
||||||
assert len(output.getvalue()) > 0
|
assert len(output.getvalue()) > 0
|
||||||
|
|
||||||
def test_stream_output_progress_event_no_tty(self):
|
def test_stream_output_progress_event_no_tty(self):
|
||||||
@ -61,7 +61,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
|||||||
]
|
]
|
||||||
output = StringIO()
|
output = StringIO()
|
||||||
|
|
||||||
events = progress_stream.stream_output(events, output)
|
events = list(progress_stream.stream_output(events, output))
|
||||||
assert len(output.getvalue()) == 0
|
assert len(output.getvalue()) == 0
|
||||||
|
|
||||||
def test_stream_output_no_progress_event_no_tty(self):
|
def test_stream_output_no_progress_event_no_tty(self):
|
||||||
@ -70,7 +70,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
|||||||
]
|
]
|
||||||
output = StringIO()
|
output = StringIO()
|
||||||
|
|
||||||
events = progress_stream.stream_output(events, output)
|
events = list(progress_stream.stream_output(events, output))
|
||||||
assert len(output.getvalue()) > 0
|
assert len(output.getvalue()) > 0
|
||||||
|
|
||||||
def test_mismatched_encoding_stream_write(self):
|
def test_mismatched_encoding_stream_write(self):
|
||||||
|
@ -173,10 +173,10 @@ class ServiceTest(unittest.TestCase):
|
|||||||
def test_self_reference_external_link(self):
|
def test_self_reference_external_link(self):
|
||||||
service = Service(
|
service = Service(
|
||||||
name='foo',
|
name='foo',
|
||||||
external_links=['default_foo_1']
|
external_links=['default_foo_1_bdfa3ed91e2c']
|
||||||
)
|
)
|
||||||
with pytest.raises(DependencyError):
|
with pytest.raises(DependencyError):
|
||||||
service.get_container_name('foo', 1)
|
service.get_container_name('foo', 1, 'bdfa3ed91e2c')
|
||||||
|
|
||||||
def test_mem_reservation(self):
|
def test_mem_reservation(self):
|
||||||
self.mock_client.create_host_config.return_value = {}
|
self.mock_client.create_host_config.return_value = {}
|
||||||
@ -317,13 +317,14 @@ class ServiceTest(unittest.TestCase):
|
|||||||
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
|
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
|
||||||
prev_container = mock.Mock(
|
prev_container = mock.Mock(
|
||||||
id='ababab',
|
id='ababab',
|
||||||
image_config={'ContainerConfig': {}})
|
image_config={'ContainerConfig': {}}
|
||||||
|
)
|
||||||
|
prev_container.full_slug = 'abcdefff1234'
|
||||||
prev_container.get.return_value = None
|
prev_container.get.return_value = None
|
||||||
|
|
||||||
opts = service._get_container_create_options(
|
opts = service._get_container_create_options(
|
||||||
{},
|
{}, 1, previous_container=prev_container
|
||||||
1,
|
)
|
||||||
previous_container=prev_container)
|
|
||||||
|
|
||||||
assert service.options['labels'] == labels
|
assert service.options['labels'] == labels
|
||||||
assert service.options['environment'] == environment
|
assert service.options['environment'] == environment
|
||||||
@ -355,11 +356,13 @@ class ServiceTest(unittest.TestCase):
|
|||||||
}.get(key, None)
|
}.get(key, None)
|
||||||
|
|
||||||
prev_container.get.side_effect = container_get
|
prev_container.get.side_effect = container_get
|
||||||
|
prev_container.full_slug = 'abcdefff1234'
|
||||||
|
|
||||||
opts = service._get_container_create_options(
|
opts = service._get_container_create_options(
|
||||||
{},
|
{},
|
||||||
1,
|
1,
|
||||||
previous_container=prev_container)
|
previous_container=prev_container
|
||||||
|
)
|
||||||
|
|
||||||
assert opts['environment'] == ['affinity:container==ababab']
|
assert opts['environment'] == ['affinity:container==ababab']
|
||||||
|
|
||||||
@ -370,6 +373,7 @@ class ServiceTest(unittest.TestCase):
|
|||||||
id='ababab',
|
id='ababab',
|
||||||
image_config={'ContainerConfig': {}})
|
image_config={'ContainerConfig': {}})
|
||||||
prev_container.get.return_value = None
|
prev_container.get.return_value = None
|
||||||
|
prev_container.full_slug = 'abcdefff1234'
|
||||||
|
|
||||||
opts = service._get_container_create_options(
|
opts = service._get_container_create_options(
|
||||||
{},
|
{},
|
||||||
@ -386,7 +390,7 @@ class ServiceTest(unittest.TestCase):
|
|||||||
|
|
||||||
@mock.patch('compose.service.Container', autospec=True)
|
@mock.patch('compose.service.Container', autospec=True)
|
||||||
def test_get_container(self, mock_container_class):
|
def test_get_container(self, mock_container_class):
|
||||||
container_dict = dict(Name='default_foo_2')
|
container_dict = dict(Name='default_foo_2_bdfa3ed91e2c')
|
||||||
self.mock_client.containers.return_value = [container_dict]
|
self.mock_client.containers.return_value = [container_dict]
|
||||||
service = Service('foo', image='foo', client=self.mock_client)
|
service = Service('foo', image='foo', client=self.mock_client)
|
||||||
|
|
||||||
@ -463,6 +467,7 @@ class ServiceTest(unittest.TestCase):
|
|||||||
@mock.patch('compose.service.Container', autospec=True)
|
@mock.patch('compose.service.Container', autospec=True)
|
||||||
def test_recreate_container(self, _):
|
def test_recreate_container(self, _):
|
||||||
mock_container = mock.create_autospec(Container)
|
mock_container = mock.create_autospec(Container)
|
||||||
|
mock_container.full_slug = 'abcdefff1234'
|
||||||
service = Service('foo', client=self.mock_client, image='someimage')
|
service = Service('foo', client=self.mock_client, image='someimage')
|
||||||
service.image = lambda: {'Id': 'abc123'}
|
service.image = lambda: {'Id': 'abc123'}
|
||||||
new_container = service.recreate_container(mock_container)
|
new_container = service.recreate_container(mock_container)
|
||||||
@ -476,6 +481,7 @@ class ServiceTest(unittest.TestCase):
|
|||||||
@mock.patch('compose.service.Container', autospec=True)
|
@mock.patch('compose.service.Container', autospec=True)
|
||||||
def test_recreate_container_with_timeout(self, _):
|
def test_recreate_container_with_timeout(self, _):
|
||||||
mock_container = mock.create_autospec(Container)
|
mock_container = mock.create_autospec(Container)
|
||||||
|
mock_container.full_slug = 'abcdefff1234'
|
||||||
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
|
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
|
||||||
service = Service('foo', client=self.mock_client, image='someimage')
|
service = Service('foo', client=self.mock_client, image='someimage')
|
||||||
service.recreate_container(mock_container, timeout=1)
|
service.recreate_container(mock_container, timeout=1)
|
||||||
@ -701,17 +707,19 @@ class ServiceTest(unittest.TestCase):
|
|||||||
image='example.com/foo',
|
image='example.com/foo',
|
||||||
client=self.mock_client,
|
client=self.mock_client,
|
||||||
network_mode=NetworkMode('bridge'),
|
network_mode=NetworkMode('bridge'),
|
||||||
networks={'bridge': {}},
|
networks={'bridge': {}, 'net2': {}},
|
||||||
links=[(Service('one', client=self.mock_client), 'one')],
|
links=[(Service('one', client=self.mock_client), 'one')],
|
||||||
volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')]
|
volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')],
|
||||||
|
volumes=[VolumeSpec('/ext', '/int', 'ro')],
|
||||||
|
build={'context': 'some/random/path'},
|
||||||
)
|
)
|
||||||
config_hash = service.config_hash
|
config_hash = service.config_hash
|
||||||
|
|
||||||
for api_version in set(API_VERSIONS.values()):
|
for api_version in set(API_VERSIONS.values()):
|
||||||
self.mock_client.api_version = api_version
|
self.mock_client.api_version = api_version
|
||||||
assert service._get_container_create_options({}, 1)['labels'][LABEL_CONFIG_HASH] == (
|
assert service._get_container_create_options(
|
||||||
config_hash
|
{}, 1
|
||||||
)
|
)['labels'][LABEL_CONFIG_HASH] == config_hash
|
||||||
|
|
||||||
def test_remove_image_none(self):
|
def test_remove_image_none(self):
|
||||||
web = Service('web', image='example', client=self.mock_client)
|
web = Service('web', image='example', client=self.mock_client)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user