Merge pull request #3698 from docker/bump-1.8.0-rc2

Bump 1.8.0 rc2
This commit is contained in:
Aanand Prasad 2016-07-06 19:11:03 -07:00 committed by GitHub
commit 7f54850b4a
22 changed files with 586 additions and 110 deletions

View File

@ -4,6 +4,11 @@ Change log
1.8.0 (2016-06-14) 1.8.0 (2016-06-14)
----------------- -----------------
**Breaking Changes**
- As announced in 1.7.0, `docker-compose rm` now removes containers
created by `docker-compose run` by default.
New Features New Features
- Added `docker-compose bundle`, a command that builds a bundle file - Added `docker-compose bundle`, a command that builds a bundle file
@ -13,9 +18,6 @@ New Features
- Added `docker-compose push`, a command that pushes service images - Added `docker-compose push`, a command that pushes service images
to a registry. to a registry.
- As announced in 1.7.0, `docker-compose rm` now removes containers
created by `docker-compose run` by default.
- Compose now supports specifying a custom TLS version for - Compose now supports specifying a custom TLS version for
interaction with the Docker Engine using the `COMPOSE_TLS_VERSION` interaction with the Docker Engine using the `COMPOSE_TLS_VERSION`
environment variable. environment variable.

View File

@ -1,5 +1,5 @@
FROM alpine:edge FROM alpine:3.4
RUN apk -U add \ RUN apk -U add \
python \ python \
py-pip py-pip

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
__version__ = '1.8.0-rc1' __version__ = '1.8.0-rc2'

View File

@ -40,35 +40,56 @@ SUPPORTED_KEYS = {
VERSION = '0.1' VERSION = '0.1'
class NeedsPush(Exception):
def __init__(self, image_name):
self.image_name = image_name
class NeedsPull(Exception):
def __init__(self, image_name):
self.image_name = image_name
class MissingDigests(Exception):
def __init__(self, needs_push, needs_pull):
self.needs_push = needs_push
self.needs_pull = needs_pull
def serialize_bundle(config, image_digests): def serialize_bundle(config, image_digests):
if config.networks: return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
log.warn("Unsupported top level key 'networks' - ignoring")
if config.volumes:
log.warn("Unsupported top level key 'volumes' - ignoring")
return json.dumps( def get_image_digests(project, allow_fetch=False):
to_bundle(config, image_digests), digests = {}
indent=2, needs_push = set()
sort_keys=True, needs_pull = set()
for service in project.services:
try:
digests[service.name] = get_image_digest(
service,
allow_fetch=allow_fetch,
) )
except NeedsPush as e:
needs_push.add(e.image_name)
except NeedsPull as e:
needs_pull.add(e.image_name)
if needs_push or needs_pull:
raise MissingDigests(needs_push, needs_pull)
return digests
def get_image_digests(project): def get_image_digest(service, allow_fetch=False):
return {
service.name: get_image_digest(service)
for service in project.services
}
def get_image_digest(service):
if 'image' not in service.options: if 'image' not in service.options:
raise UserError( raise UserError(
"Service '{s.name}' doesn't define an image tag. An image name is " "Service '{s.name}' doesn't define an image tag. An image name is "
"required to generate a proper image digest for the bundle. Specify " "required to generate a proper image digest for the bundle. Specify "
"an image repo and tag with the 'image' option.".format(s=service)) "an image repo and tag with the 'image' option.".format(s=service))
repo, tag, separator = parse_repository_tag(service.options['image']) _, _, separator = parse_repository_tag(service.options['image'])
# Compose file already uses a digest, no lookup required # Compose file already uses a digest, no lookup required
if separator == '@': if separator == '@':
return service.options['image'] return service.options['image']
@ -87,13 +108,17 @@ def get_image_digest(service):
# digests # digests
return image['RepoDigests'][0] return image['RepoDigests'][0]
if not allow_fetch:
if 'build' in service.options:
raise NeedsPush(service.image_name)
else:
raise NeedsPull(service.image_name)
return fetch_image_digest(service)
def fetch_image_digest(service):
if 'build' not in service.options: if 'build' not in service.options:
log.warn(
"Compose needs to pull the image for '{s.name}' in order to create "
"a bundle. This may result in a more recent image being used. "
"It is recommended that you use an image tagged with a "
"specific version to minimize the potential "
"differences.".format(s=service))
digest = service.pull() digest = service.pull()
else: else:
try: try:
@ -108,21 +133,32 @@ def get_image_digest(service):
if not digest: if not digest:
raise ValueError("Failed to get digest for %s" % service.name) raise ValueError("Failed to get digest for %s" % service.name)
repo, _, _ = parse_repository_tag(service.options['image'])
identifier = '{repo}@{digest}'.format(repo=repo, digest=digest) identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
# only do this if RepoDigests isn't already populated
image = service.image()
if not image['RepoDigests']:
# Pull by digest so that image['RepoDigests'] is populated for next time # Pull by digest so that image['RepoDigests'] is populated for next time
# and we don't have to pull/push again # and we don't have to pull/push again
service.client.pull(identifier) service.client.pull(identifier)
log.info("Stored digest for {}".format(service.image_name))
return identifier return identifier
def to_bundle(config, image_digests): def to_bundle(config, image_digests):
if config.networks:
log.warn("Unsupported top level key 'networks' - ignoring")
if config.volumes:
log.warn("Unsupported top level key 'volumes' - ignoring")
config = denormalize_config(config) config = denormalize_config(config)
return { return {
'version': VERSION, 'Version': VERSION,
'services': { 'Services': {
name: convert_service_to_bundle( name: convert_service_to_bundle(
name, name,
service_dict, service_dict,

View File

@ -15,9 +15,11 @@ from . import errors
from . import signals from . import signals
from .. import __version__ from .. import __version__
from ..bundle import get_image_digests from ..bundle import get_image_digests
from ..bundle import MissingDigests
from ..bundle import serialize_bundle from ..bundle import serialize_bundle
from ..config import ConfigurationError from ..config import ConfigurationError
from ..config import parse_environment from ..config import parse_environment
from ..config.environment import Environment
from ..config.serialize import serialize_config from ..config.serialize import serialize_config
from ..const import DEFAULT_TIMEOUT from ..const import DEFAULT_TIMEOUT
from ..const import IS_WINDOWS_PLATFORM from ..const import IS_WINDOWS_PLATFORM
@ -216,26 +218,50 @@ class TopLevelCommand(object):
def bundle(self, config_options, options): def bundle(self, config_options, options):
""" """
Generate a Docker bundle from the Compose file. Generate a Distributed Application Bundle (DAB) from the Compose file.
Local images will be pushed to a Docker registry, and remote images Images must have digests stored, which requires interaction with a
will be pulled to fetch an image digest. Docker registry. If digests aren't stored for all images, you can pass
`--fetch-digests` to automatically fetch them. Images for services
with a `build` key will be pushed. Images for services without a
`build` key will be pulled.
Usage: bundle [options] Usage: bundle [options]
Options: Options:
--fetch-digests Automatically fetch image digests if missing
-o, --output PATH Path to write the bundle file to. -o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dsb". Defaults to "<project name>.dab".
""" """
self.project = project_from_options('.', config_options) self.project = project_from_options('.', config_options)
compose_config = get_config_from_options(self.project_dir, config_options) compose_config = get_config_from_options(self.project_dir, config_options)
output = options["--output"] output = options["--output"]
if not output: if not output:
output = "{}.dsb".format(self.project.name) output = "{}.dab".format(self.project.name)
with errors.handle_connection_errors(self.project.client): with errors.handle_connection_errors(self.project.client):
image_digests = get_image_digests(self.project) try:
image_digests = get_image_digests(
self.project,
allow_fetch=options['--fetch-digests'],
)
except MissingDigests as e:
def list_images(images):
return "\n".join(" {}".format(name) for name in sorted(images))
paras = ["Some images are missing digests."]
if e.needs_push:
paras += ["The following images need to be pushed:", list_images(e.needs_push)]
if e.needs_pull:
paras += ["The following images need to be pulled:", list_images(e.needs_pull)]
paras.append("If this is OK, run `docker-compose bundle --fetch-digests`.")
raise UserError("\n\n".join(paras))
with open(output, 'w') as f: with open(output, 'w') as f:
f.write(serialize_bundle(compose_config, image_digests)) f.write(serialize_bundle(compose_config, image_digests))
@ -866,7 +892,9 @@ def build_container_options(options, detach, command):
} }
if options['-e']: if options['-e']:
container_options['environment'] = parse_environment(options['-e']) container_options['environment'] = Environment.from_command_line(
parse_environment(options['-e'])
)
if options['--entrypoint']: if options['--entrypoint']:
container_options['entrypoint'] = options.get('--entrypoint') container_options['entrypoint'] = options.get('--entrypoint')

View File

@ -60,6 +60,18 @@ class Environment(dict):
instance.update(os.environ) instance.update(os.environ)
return instance return instance
@classmethod
def from_command_line(cls, parsed_env_opts):
result = cls()
for k, v in parsed_env_opts.items():
# Values from the command line take priority, unless they're unset
# in which case they take the value from the system's environment
if v is None and k in os.environ:
result[k] = os.environ[k]
else:
result[k] = v
return result
def __getitem__(self, key): def __getitem__(self, key):
try: try:
return super(Environment, self).__getitem__(key) return super(Environment, self).__getitem__(key)

View File

@ -109,6 +109,18 @@ _docker_compose_build() {
} }
_docker_compose_bundle() {
case "$prev" in
--output|-o)
_filedir
return
;;
esac
COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) )
}
_docker_compose_config() { _docker_compose_config() {
COMPREPLY=( $( compgen -W "--help --quiet -q --services" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--help --quiet -q --services" -- "$cur" ) )
} }
@ -304,6 +316,18 @@ _docker_compose_pull() {
} }
_docker_compose_push() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--help --ignore-push-failures" -- "$cur" ) )
;;
*)
__docker_compose_services_all
;;
esac
}
_docker_compose_restart() { _docker_compose_restart() {
case "$prev" in case "$prev" in
--timeout|-t) --timeout|-t)
@ -455,6 +479,7 @@ _docker_compose() {
local commands=( local commands=(
build build
bundle
config config
create create
down down
@ -467,6 +492,7 @@ _docker_compose() {
port port
ps ps
pull pull
push
restart restart
rm rm
run run

View File

@ -19,52 +19,49 @@
# * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion # * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# For compatibility reasons, Compose and therefore its completion supports several __docker-compose_q() {
# stack compositon files as listed here, in descending priority. docker-compose 2>/dev/null $compose_options "$@"
# Support for these filenames might be dropped in some future version.
__docker-compose_compose_file() {
local file
for file in docker-compose.y{,a}ml ; do
[ -e $file ] && {
echo $file
return
}
done
echo docker-compose.yml
} }
# Extracts all service names from docker-compose.yml. # All services defined in docker-compose.yml
___docker-compose_all_services_in_compose_file() { __docker-compose_all_services_in_compose_file() {
local already_selected local already_selected
local -a services local -a services
already_selected=$(echo $words | tr " " "|") already_selected=$(echo $words | tr " " "|")
awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | grep -Ev "$already_selected" __docker-compose_q config --services \
| grep -Ev "^(${already_selected})$"
} }
# All services, even those without an existing container # All services, even those without an existing container
__docker-compose_services_all() { __docker-compose_services_all() {
[[ $PREFIX = -* ]] && return 1 [[ $PREFIX = -* ]] && return 1
integer ret=1 integer ret=1
services=$(___docker-compose_all_services_in_compose_file) services=$(__docker-compose_all_services_in_compose_file)
_alternative "args:services:($services)" && ret=0 _alternative "args:services:($services)" && ret=0
return ret return ret
} }
# All services that have an entry with the given key in their docker-compose.yml section # All services that have an entry with the given key in their docker-compose.yml section
___docker-compose_services_with_key() { __docker-compose_services_with_key() {
local already_selected local already_selected
local -a buildable local -a buildable
already_selected=$(echo $words | tr " " "|") already_selected=$(echo $words | tr " " "|")
# flatten sections to one line, then filter lines containing the key and return section name. # flatten sections to one line, then filter lines containing the key and return section name.
awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 2>/dev/null | grep -Ev "$already_selected" __docker-compose_q config \
| sed -n -e '/^services:/,/^[^ ]/p' \
| sed -n 's/^ //p' \
| awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
| grep " \+$1:" \
| cut -d: -f1 \
| grep -Ev "^(${already_selected})$"
} }
# All services that are defined by a Dockerfile reference # All services that are defined by a Dockerfile reference
__docker-compose_services_from_build() { __docker-compose_services_from_build() {
[[ $PREFIX = -* ]] && return 1 [[ $PREFIX = -* ]] && return 1
integer ret=1 integer ret=1
buildable=$(___docker-compose_services_with_key build) buildable=$(__docker-compose_services_with_key build)
_alternative "args:buildable services:($buildable)" && ret=0 _alternative "args:buildable services:($buildable)" && ret=0
return ret return ret
@ -74,7 +71,7 @@ __docker-compose_services_from_build() {
__docker-compose_services_from_image() { __docker-compose_services_from_image() {
[[ $PREFIX = -* ]] && return 1 [[ $PREFIX = -* ]] && return 1
integer ret=1 integer ret=1
pullable=$(___docker-compose_services_with_key image) pullable=$(__docker-compose_services_with_key image)
_alternative "args:pullable services:($pullable)" && ret=0 _alternative "args:pullable services:($pullable)" && ret=0
return ret return ret
@ -96,7 +93,7 @@ __docker-compose_get_services() {
shift shift
[[ $kind =~ (stopped|all) ]] && args=($args -a) [[ $kind =~ (stopped|all) ]] && args=($args -a)
lines=(${(f)"$(_call_program commands docker ps $args)"}) lines=(${(f)"$(_call_program commands docker $docker_options ps $args)"})
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"}) services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
# Parse header line to find columns # Parse header line to find columns
@ -185,7 +182,17 @@ __docker-compose_commands() {
} }
__docker-compose_subcommand() { __docker-compose_subcommand() {
local opts_help='(: -)--help[Print usage]' local opts_help opts_force_recreate opts_no_recreate opts_no_build opts_remove_orphans opts_timeout opts_no_color opts_no_deps
opts_help='(: -)--help[Print usage]'
opts_force_recreate="(--no-recreate)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]"
opts_no_recreate="(--force-recreate)--no-recreate[If containers already exist, don't recreate them. Incompatible with --force-recreate.]"
opts_no_build="(--build)--no-build[Don't build an image, even if it's missing.]"
opts_remove_orphans="--remove-orphans[Remove containers for services not defined in the Compose file]"
opts_timeout=('(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: ")
opts_no_color='--no-color[Produce monochrome output.]'
opts_no_deps="--no-deps[Don't start linked services.]"
integer ret=1 integer ret=1
case "$words[1]" in case "$words[1]" in
@ -193,10 +200,15 @@ __docker-compose_subcommand() {
_arguments \ _arguments \
$opts_help \ $opts_help \
'--force-rm[Always remove intermediate containers.]' \ '--force-rm[Always remove intermediate containers.]' \
'--no-cache[Do not use cache when building the image]' \ '--no-cache[Do not use cache when building the image.]' \
'--pull[Always attempt to pull a newer version of the image.]' \ '--pull[Always attempt to pull a newer version of the image.]' \
'*:services:__docker-compose_services_from_build' && ret=0 '*:services:__docker-compose_services_from_build' && ret=0
;; ;;
(bundle)
_arguments \
$opts_help \
'(--output -o)'{--output,-o}'[Path to write the bundle file to. Defaults to "<project name>.dab".]:file:_files' && ret=0
;;
(config) (config)
_arguments \ _arguments \
$opts_help \ $opts_help \
@ -206,21 +218,23 @@ __docker-compose_subcommand() {
(create) (create)
_arguments \ _arguments \
$opts_help \ $opts_help \
"(--no-recreate --no-build)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \ $opts_force_recreate \
"(--force-recreate)--no-build[If containers already exist, don't recreate them. Incompatible with --force-recreate.]" \ $opts_no_recreate \
"(--force-recreate)--no-recreate[Don't build an image, even if it's missing]" \ $opts_no_build \
"(--no-build)--build[Build images before creating containers.]" \
'*:services:__docker-compose_services_all' && ret=0 '*:services:__docker-compose_services_all' && ret=0
;; ;;
(down) (down)
_arguments \ _arguments \
$opts_help \ $opts_help \
"--rmi[Remove images, type may be one of: 'all' to remove all images, or 'local' to remove only images that don't have an custom name set by the 'image' field]:type:(all local)" \ "--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
'(-v --volumes)'{-v,--volumes}"[Remove data volumes]" && ret=0 '(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
$opts_remove_orphans && ret=0
;; ;;
(events) (events)
_arguments \ _arguments \
$opts_help \ $opts_help \
'--json[Output events as a stream of json objects.]' \ '--json[Output events as a stream of json objects]' \
'*:services:__docker-compose_services_all' && ret=0 '*:services:__docker-compose_services_all' && ret=0
;; ;;
(exec) (exec)
@ -230,7 +244,7 @@ __docker-compose_subcommand() {
'--privileged[Give extended privileges to the process.]' \ '--privileged[Give extended privileges to the process.]' \
'--user=[Run the command as this user.]:username:_users' \ '--user=[Run the command as this user.]:username:_users' \
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \ '-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
'--index=[Index of the container if there are multiple instances of a service (default: 1)]:index: ' \ '--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
'(-):running services:__docker-compose_runningservices' \ '(-):running services:__docker-compose_runningservices' \
'(-):command: _command_names -e' \ '(-):command: _command_names -e' \
'*::arguments: _normal' && ret=0 '*::arguments: _normal' && ret=0
@ -248,7 +262,7 @@ __docker-compose_subcommand() {
_arguments \ _arguments \
$opts_help \ $opts_help \
'(-f --follow)'{-f,--follow}'[Follow log output]' \ '(-f --follow)'{-f,--follow}'[Follow log output]' \
'--no-color[Produce monochrome output.]' \ $opts_no_color \
'--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \ '--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \ '(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
'*:services:__docker-compose_services_all' && ret=0 '*:services:__docker-compose_services_all' && ret=0
@ -261,8 +275,8 @@ __docker-compose_subcommand() {
(port) (port)
_arguments \ _arguments \
$opts_help \ $opts_help \
'--protocol=-[tcp or udap (defaults to tcp)]:protocol:(tcp udp)' \ '--protocol=[tcp or udp \[default: tcp\]]:protocol:(tcp udp)' \
'--index=-[index of the container if there are mutiple instances of a service (defaults to 1)]:index: ' \ '--index=[index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
'1:running services:__docker-compose_runningservices' \ '1:running services:__docker-compose_runningservices' \
'2:port:_ports' && ret=0 '2:port:_ports' && ret=0
;; ;;
@ -278,11 +292,17 @@ __docker-compose_subcommand() {
'--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \ '--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
'*:services:__docker-compose_services_from_image' && ret=0 '*:services:__docker-compose_services_from_image' && ret=0
;; ;;
(push)
_arguments \
$opts_help \
'--ignore-push-failures[Push what it can and ignores images with push failures.]' \
'*:services:__docker-compose_services' && ret=0
;;
(rm) (rm)
_arguments \ _arguments \
$opts_help \ $opts_help \
'(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \ '(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \
'-v[Remove volumes associated with containers]' \ '-v[Remove any anonymous volumes attached to containers]' \
'*:stopped services:__docker-compose_stoppedservices' && ret=0 '*:stopped services:__docker-compose_stoppedservices' && ret=0
;; ;;
(run) (run)
@ -291,14 +311,14 @@ __docker-compose_subcommand() {
'-d[Detached mode: Run container in the background, print new container name.]' \ '-d[Detached mode: Run container in the background, print new container name.]' \
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \ '*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \ '--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
'--name[Assign a name to the container]:name: ' \ '--name=[Assign a name to the container]:name: ' \
"--no-deps[Don't start linked services.]" \ $opts_no_deps \
'(-p --publish)'{-p,--publish=-}"[Run command with manually mapped container's port(s) to the host.]" \ '(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
'--rm[Remove container after run. Ignored in detached mode.]' \ '--rm[Remove container after run. Ignored in detached mode.]' \
"--service-ports[Run command with the service's ports enabled and mapped to the host.]" \ "--service-ports[Run command with the service's ports enabled and mapped to the host.]" \
'-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \ '-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \
'(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \ '(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
'(-w --workdir)'{-w=,--workdir=}'[Working directory inside the container]:workdir: ' \ '(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
'(-):services:__docker-compose_services' \ '(-):services:__docker-compose_services' \
'(-):command: _command_names -e' \ '(-):command: _command_names -e' \
'*::arguments: _normal' && ret=0 '*::arguments: _normal' && ret=0
@ -306,7 +326,7 @@ __docker-compose_subcommand() {
(scale) (scale)
_arguments \ _arguments \
$opts_help \ $opts_help \
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \ $opts_timeout \
'*:running services:__docker-compose_runningservices' && ret=0 '*:running services:__docker-compose_runningservices' && ret=0
;; ;;
(start) (start)
@ -317,7 +337,7 @@ __docker-compose_subcommand() {
(stop|restart) (stop|restart)
_arguments \ _arguments \
$opts_help \ $opts_help \
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \ $opts_timeout \
'*:running services:__docker-compose_runningservices' && ret=0 '*:running services:__docker-compose_runningservices' && ret=0
;; ;;
(unpause) (unpause)
@ -328,15 +348,16 @@ __docker-compose_subcommand() {
(up) (up)
_arguments \ _arguments \
$opts_help \ $opts_help \
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names.]' \ '(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit.]' \
'--build[Build images before starting containers.]' \ $opts_no_color \
'--no-color[Produce monochrome output.]' \ $opts_no_deps \
"--no-deps[Don't start linked services.]" \ $opts_force_recreate \
"--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \ $opts_no_recreate \
"--no-recreate[If containers already exist, don't recreate them.]" \ $opts_no_build \
"--no-build[Don't build an image, even if it's missing]" \ "(--no-build)--build[Build images before starting containers.]" \
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \ "(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \ '(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
$opts_remove_orphans \
'*:services:__docker-compose_services_all' && ret=0 '*:services:__docker-compose_services_all' && ret=0
;; ;;
(version) (version)
@ -366,16 +387,57 @@ _docker-compose() {
_arguments -C \ _arguments -C \
'(- :)'{-h,--help}'[Get help]' \ '(- :)'{-h,--help}'[Get help]' \
'--verbose[Show more output]' \
'(- :)'{-v,--version}'[Print version and exit]' \
'(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \ '(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \ '(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
'--verbose[Show more output]' \
'(- :)'{-v,--version}'[Print version and exit]' \
'(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
'--tls[Use TLS; implied by --tlsverify]' \
'--tlscacert=[Trust certs signed only by this CA]:ca path:' \
'--tlscert=[Path to TLS certificate file]:client cert path:' \
'--tlskey=[Path to TLS key file]:tls key path:' \
'--tlsverify[Use TLS and verify the remote]' \
"--skip-hostname-check[Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)]" \
'(-): :->command' \ '(-): :->command' \
'(-)*:: :->option-or-argument' && ret=0 '(-)*:: :->option-or-argument' && ret=0
local compose_file=${opt_args[-f]}${opt_args[--file]} local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
local compose_project=${opt_args[-p]}${opt_args[--project-name]}
local compose_options="${compose_file:+--file $compose_file} ${compose_project:+--project-name $compose_project}" relevant_compose_flags=(
"--file" "-f"
"--host" "-H"
"--project-name" "-p"
"--tls"
"--tlscacert"
"--tlscert"
"--tlskey"
"--tlsverify"
"--skip-hostname-check"
)
relevant_docker_flags=(
"--host" "-H"
"--tls"
"--tlscacert"
"--tlscert"
"--tlskey"
"--tlsverify"
)
for k in "${(@k)opt_args}"; do
if [[ -n "${relevant_docker_flags[(r)$k]}" ]]; then
docker_options+=$k
if [[ -n "$opt_args[$k]" ]]; then
docker_options+=$opt_args[$k]
fi
fi
if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
compose_options+=$k
if [[ -n "$opt_args[$k]" ]]; then
compose_options+=$opt_args[$k]
fi
fi
done
case $state in case $state in
(command) (command)

View File

@ -29,7 +29,7 @@ and a `docker-compose.yml` file.
The Dockerfile defines an application's image content via one or more build The Dockerfile defines an application's image content via one or more build
commands that configure that image. Once built, you can run the image in a commands that configure that image. Once built, you can run the image in a
container. For more information on `Dockerfiles`, see the [Docker user container. For more information on `Dockerfiles`, see the [Docker user
guide](/engine/userguide/containers/dockerimages.md#building-an-image-from-a-dockerfile) guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile)
and the [Dockerfile reference](/engine/reference/builder.md). and the [Dockerfile reference](/engine/reference/builder.md).
3. Add the following content to the `Dockerfile`. 3. Add the following content to the `Dockerfile`.

View File

@ -77,7 +77,7 @@ dependencies the Python application requires, including Python itself.
* Install the Python dependencies. * Install the Python dependencies.
* Set the default command for the container to `python app.py` * Set the default command for the container to `python app.py`
For more information on how to write Dockerfiles, see the [Docker user guide](/engine/userguide/containers/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md). For more information on how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
2. Build the image. 2. Build the image.

View File

@ -39,7 +39,7 @@ which the release page specifies, in your terminal.
The following is an example command illustrating the format: The following is an example command illustrating the format:
curl -L https://github.com/docker/compose/releases/download/1.8.0-rc1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose curl -L https://github.com/docker/compose/releases/download/1.8.0-rc2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
If you have problems installing with `curl`, see If you have problems installing with `curl`, see
[Alternative Install Options](#alternative-install-options). [Alternative Install Options](#alternative-install-options).
@ -54,7 +54,7 @@ which the release page specifies, in your terminal.
7. Test the installation. 7. Test the installation.
$ docker-compose --version $ docker-compose --version
docker-compose version: 1.8.0-rc1 docker-compose version: 1.8.0-rc2
## Alternative install options ## Alternative install options
@ -77,7 +77,7 @@ to get started.
Compose can also be run inside a container, from a small bash script wrapper. Compose can also be run inside a container, from a small bash script wrapper.
To install compose as a container run: To install compose as a container run:
$ curl -L https://github.com/docker/compose/releases/download/1.8.0-rc1/run.sh > /usr/local/bin/docker-compose $ curl -L https://github.com/docker/compose/releases/download/1.8.0-rc2/run.sh > /usr/local/bin/docker-compose
$ chmod +x /usr/local/bin/docker-compose $ chmod +x /usr/local/bin/docker-compose
## Master builds ## Master builds

View File

@ -32,7 +32,7 @@ Dockerfile consists of:
That'll put your application code inside an image that will build a container That'll put your application code inside an image that will build a container
with Ruby, Bundler and all your dependencies inside it. For more information on with Ruby, Bundler and all your dependencies inside it. For more information on
how to write Dockerfiles, see the [Docker user guide](/engine/userguide/containers/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md). how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`. Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`.

View File

@ -1,9 +1,12 @@
PyYAML==3.11 PyYAML==3.11
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
cached-property==1.2.0 cached-property==1.2.0
docker-py==1.8.1 docker-py==1.9.0rc2
dockerpty==0.4.1 dockerpty==0.4.1
docopt==0.6.1 docopt==0.6.1
enum34==1.0.4 enum34==1.0.4; python_version < '3.4'
functools32==3.2.3.post2; python_version < '3.2'
ipaddress==1.0.16
jsonschema==2.5.1 jsonschema==2.5.1
requests==2.7.0 requests==2.7.0
six==1.7.3 six==1.7.3

View File

@ -15,7 +15,7 @@
set -e set -e
VERSION="1.8.0-rc1" VERSION="1.8.0-rc2"
IMAGE="docker/compose:$VERSION" IMAGE="docker/compose:$VERSION"

View File

@ -6,5 +6,5 @@ if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
tox -e py27,py34 -- tests/unit tox -e py27,py34 -- tests/unit
else else
# TODO: we could also install py34 and test against it # TODO: we could also install py34 and test against it
python -m tox -e py27 -- tests/unit tox -e py27 -- tests/unit
fi fi

View File

@ -5,5 +5,6 @@ set -ex
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
pip install tox==2.1.1 pip install tox==2.1.1
else else
pip install --user tox==2.1.1 sudo pip install --upgrade pip tox==2.1.1 virtualenv
pip --version
fi fi

View File

@ -34,7 +34,7 @@ install_requires = [
'requests >= 2.6.1, < 2.8', 'requests >= 2.6.1, < 2.8',
'texttable >= 0.8.1, < 0.9', 'texttable >= 0.8.1, < 0.9',
'websocket-client >= 0.32.0, < 1.0', 'websocket-client >= 0.32.0, < 1.0',
'docker-py >= 1.8.1, < 2', 'docker-py == 1.9.0rc2',
'dockerpty >= 0.4.1, < 0.5', 'dockerpty >= 0.4.1, < 0.5',
'six >= 1.3.0, < 2', 'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3', 'jsonschema >= 2.5.1, < 3',

View File

@ -12,6 +12,7 @@ from collections import Counter
from collections import namedtuple from collections import namedtuple
from operator import attrgetter from operator import attrgetter
import py
import yaml import yaml
from docker import errors from docker import errors
@ -378,6 +379,32 @@ class CLITestCase(DockerClientTestCase):
] ]
assert not containers assert not containers
def test_bundle_with_digests(self):
self.base_dir = 'tests/fixtures/bundle-with-digests/'
tmpdir = py.test.ensuretemp('cli_test_bundle')
self.addCleanup(tmpdir.remove)
filename = str(tmpdir.join('example.dab'))
self.dispatch(['bundle', '--output', filename])
with open(filename, 'r') as fh:
bundle = json.load(fh)
assert bundle == {
'Version': '0.1',
'Services': {
'web': {
'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
'44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
'Networks': ['default'],
},
'redis': {
'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
'374b2b7392de1e7d77be26ef8f7b'),
'Networks': ['default'],
}
},
}
def test_create(self): def test_create(self):
self.dispatch(['create']) self.dispatch(['create'])
service = self.project.get_service('simple') service = self.project.get_service('simple')
@ -1135,7 +1162,10 @@ class CLITestCase(DockerClientTestCase):
] ]
for _, config in networks.items(): for _, config in networks.items():
assert not config['Aliases'] # TODO: once we drop support for API <1.24, this can be changed to:
# assert config['Aliases'] == [container.short_id]
aliases = set(config['Aliases'] or []) - set([container.short_id])
assert not aliases
@v2_only() @v2_only()
def test_run_detached_connects_to_network(self): def test_run_detached_connects_to_network(self):
@ -1152,7 +1182,10 @@ class CLITestCase(DockerClientTestCase):
] ]
for _, config in networks.items(): for _, config in networks.items():
assert not config['Aliases'] # TODO: once we drop support for API <1.24, this can be changed to:
# assert config['Aliases'] == [container.short_id]
aliases = set(config['Aliases'] or []) - set([container.short_id])
assert not aliases
assert self.lookup(container, 'app') assert self.lookup(container, 'app')
assert self.lookup(container, 'db') assert self.lookup(container, 'db')
@ -1183,6 +1216,18 @@ class CLITestCase(DockerClientTestCase):
'simplecomposefile_simple_run_1', 'simplecomposefile_simple_run_1',
'exited')) 'exited'))
@mock.patch.dict(os.environ)
def test_run_env_values_from_system(self):
os.environ['FOO'] = 'bar'
os.environ['BAR'] = 'baz'
self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None)
container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
environment = container.get('Config.Env')
assert 'FOO=bar' in environment
assert 'BAR=baz' not in environment
def test_rm(self): def test_rm(self):
service = self.project.get_service('simple') service = self.project.get_service('simple')
service.create_container() service.create_container()

View File

@ -0,0 +1,9 @@
version: '2.0'
services:
web:
image: dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d
redis:
image: redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d374b2b7392de1e7d77be26ef8f7b

View File

@ -397,7 +397,7 @@ class ServiceTest(DockerClientTestCase):
assert not mock_log.warn.called assert not mock_log.warn.called
assert ( assert (
[mount['Destination'] for mount in new_container.get('Mounts')], [mount['Destination'] for mount in new_container.get('Mounts')] ==
['/data'] ['/data']
) )
assert new_container.get_mount('/data')['Source'] != host_path assert new_container.get_mount('/data')['Source'] != host_path

232
tests/unit/bundle_test.py Normal file
View File

@ -0,0 +1,232 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import docker
import mock
import pytest
from compose import bundle
from compose import service
from compose.cli.errors import UserError
from compose.config.config import Config
@pytest.fixture
def mock_service():
return mock.create_autospec(
service.Service,
client=mock.create_autospec(docker.Client),
options={})
def test_get_image_digest_exists(mock_service):
mock_service.options['image'] = 'abcd'
mock_service.image.return_value = {'RepoDigests': ['digest1']}
digest = bundle.get_image_digest(mock_service)
assert digest == 'digest1'
def test_get_image_digest_image_uses_digest(mock_service):
mock_service.options['image'] = image_id = 'redis@sha256:digest'
digest = bundle.get_image_digest(mock_service)
assert digest == image_id
assert not mock_service.image.called
def test_get_image_digest_no_image(mock_service):
with pytest.raises(UserError) as exc:
bundle.get_image_digest(service.Service(name='theservice'))
assert "doesn't define an image tag" in exc.exconly()
def test_fetch_image_digest_for_image_with_saved_digest(mock_service):
mock_service.options['image'] = image_id = 'abcd'
mock_service.pull.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': ['digest1']}
digest = bundle.fetch_image_digest(mock_service)
assert digest == image_id + '@' + expected
mock_service.pull.assert_called_once_with()
assert not mock_service.push.called
assert not mock_service.client.pull.called
def test_fetch_image_digest_for_image(mock_service):
mock_service.options['image'] = image_id = 'abcd'
mock_service.pull.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': []}
digest = bundle.fetch_image_digest(mock_service)
assert digest == image_id + '@' + expected
mock_service.pull.assert_called_once_with()
assert not mock_service.push.called
mock_service.client.pull.assert_called_once_with(digest)
def test_fetch_image_digest_for_build(mock_service):
mock_service.options['build'] = '.'
mock_service.options['image'] = image_id = 'abcd'
mock_service.push.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': ['digest1']}
digest = bundle.fetch_image_digest(mock_service)
assert digest == image_id + '@' + expected
mock_service.push.assert_called_once_with()
assert not mock_service.pull.called
assert not mock_service.client.pull.called
def test_to_bundle():
image_digests = {'a': 'aaaa', 'b': 'bbbb'}
services = [
{'name': 'a', 'build': '.', },
{'name': 'b', 'build': './b'},
]
config = Config(
version=2,
services=services,
volumes={'special': {}},
networks={'extra': {}})
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
output = bundle.to_bundle(config, image_digests)
assert mock_log.mock_calls == [
mock.call("Unsupported top level key 'networks' - ignoring"),
mock.call("Unsupported top level key 'volumes' - ignoring"),
]
assert output == {
'Version': '0.1',
'Services': {
'a': {'Image': 'aaaa', 'Networks': ['default']},
'b': {'Image': 'bbbb', 'Networks': ['default']},
}
}
def test_convert_service_to_bundle():
name = 'theservice'
image_digest = 'thedigest'
service_dict = {
'ports': ['80'],
'expose': ['1234'],
'networks': {'extra': {}},
'command': 'foo',
'entrypoint': 'entry',
'environment': {'BAZ': 'ENV'},
'build': '.',
'working_dir': '/tmp',
'user': 'root',
'labels': {'FOO': 'LABEL'},
'privileged': True,
}
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
config = bundle.convert_service_to_bundle(name, service_dict, image_digest)
mock_log.assert_called_once_with(
"Unsupported key 'privileged' in services.theservice - ignoring")
assert config == {
'Image': image_digest,
'Ports': [
{'Protocol': 'tcp', 'Port': 80},
{'Protocol': 'tcp', 'Port': 1234},
],
'Networks': ['extra'],
'Command': ['entry', 'foo'],
'Env': ['BAZ=ENV'],
'WorkingDir': '/tmp',
'User': 'root',
'Labels': {'FOO': 'LABEL'},
}
def test_set_command_and_args_none():
config = {}
bundle.set_command_and_args(config, [], [])
assert config == {}
def test_set_command_and_args_from_command():
config = {}
bundle.set_command_and_args(config, [], "echo ok")
assert config == {'Args': ['echo', 'ok']}
def test_set_command_and_args_from_entrypoint():
config = {}
bundle.set_command_and_args(config, "echo entry", [])
assert config == {'Command': ['echo', 'entry']}
def test_set_command_and_args_from_both():
config = {}
bundle.set_command_and_args(config, "echo entry", ["extra", "arg"])
assert config == {'Command': ['echo', 'entry', "extra", "arg"]}
def test_make_service_networks_default():
name = 'theservice'
service_dict = {}
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict)
assert not mock_log.called
assert networks == ['default']
def test_make_service_networks():
name = 'theservice'
service_dict = {
'networks': {
'foo': {
'aliases': ['one', 'two'],
},
'bar': {}
},
}
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict)
mock_log.assert_called_once_with(
"Unsupported key 'aliases' in services.theservice.networks.foo - ignoring")
assert sorted(networks) == sorted(service_dict['networks'])
def test_make_port_specs():
service_dict = {
'expose': ['80', '500/udp'],
'ports': [
'400:80',
'222',
'127.0.0.1:8001:8001',
'127.0.0.1:5000-5001:3000-3001'],
}
port_specs = bundle.make_port_specs(service_dict)
assert port_specs == [
{'Protocol': 'tcp', 'Port': 80},
{'Protocol': 'tcp', 'Port': 222},
{'Protocol': 'tcp', 'Port': 8001},
{'Protocol': 'tcp', 'Port': 3000},
{'Protocol': 'tcp', 'Port': 3001},
{'Protocol': 'udp', 'Port': 500},
]
def test_make_port_spec_with_protocol():
port_spec = bundle.make_port_spec("5000/udp")
assert port_spec == {'Protocol': 'udp', 'Port': 5000}
def test_make_port_spec_default_protocol():
port_spec = bundle.make_port_spec("50000")
assert port_spec == {'Protocol': 'tcp', 'Port': 50000}

View File

@ -65,3 +65,23 @@ class ProgressStreamTestCase(unittest.TestCase):
events = progress_stream.stream_output(events, output) events = progress_stream.stream_output(events, output)
self.assertTrue(len(output.getvalue()) > 0) self.assertTrue(len(output.getvalue()) > 0)
def test_get_digest_from_push():
digest = "sha256:abcd"
events = [
{"status": "..."},
{"status": "..."},
{"progressDetail": {}, "aux": {"Digest": digest}},
]
assert progress_stream.get_digest_from_push(events) == digest
def test_get_digest_from_pull():
digest = "sha256:abcd"
events = [
{"status": "..."},
{"status": "..."},
{"status": "Digest: %s" % digest},
]
assert progress_stream.get_digest_from_pull(events) == digest