Merge pull request #3772 from docker/bump-1.8.0-rc3

Bump 1.8.0-rc3
This commit is contained in:
Joffrey F 2016-07-27 11:50:24 -07:00 committed by GitHub
commit 1bf0cd07de
30 changed files with 585 additions and 121 deletions

View File

@ -9,11 +9,15 @@ Change log
- As announced in 1.7.0, `docker-compose rm` now removes containers
created by `docker-compose run` by default.
- Setting `entrypoint` on a service now empties out any default
command that was set on the image (i.e. any `CMD` instruction in the
Dockerfile used to build it). This makes it consistent with
the `--entrypoint` flag to `docker run`.
New Features
- Added `docker-compose bundle`, a command that builds a bundle file
to be consumed by the new *Docker Stack* commands in Docker 1.12.
This command automatically pushes and pulls images as needed.
- Added `docker-compose push`, a command that pushes service images
to a registry.
@ -27,6 +31,9 @@ Bug Fixes
- Fixed a bug where Compose would erroneously try to read `.env`
at the project's root when it is a directory.
- `docker-compose run -e VAR` now passes `VAR` through from the shell
to the container, as with `docker run -e VAR`.
- Improved config merging when multiple compose files are involved
for several service sub-keys.
@ -52,6 +59,9 @@ Bug Fixes
- Fixed a bug where errors during `docker-compose up` would show
an unrelated stacktrace at the end of the process.
- `docker-compose create` and `docker-compose start` show more
descriptive error messages when something goes wrong.
1.7.1 (2016-05-04)
-----------------

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import
from __future__ import unicode_literals
__version__ = '1.8.0-rc2'
__version__ = '1.8.0-rc3'

View File

@ -60,7 +60,7 @@ def serialize_bundle(config, image_digests):
return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
def get_image_digests(project, allow_fetch=False):
def get_image_digests(project, allow_push=False):
digests = {}
needs_push = set()
needs_pull = set()
@ -69,7 +69,7 @@ def get_image_digests(project, allow_fetch=False):
try:
digests[service.name] = get_image_digest(
service,
allow_fetch=allow_fetch,
allow_push=allow_push,
)
except NeedsPush as e:
needs_push.add(e.image_name)
@ -82,7 +82,7 @@ def get_image_digests(project, allow_fetch=False):
return digests
def get_image_digest(service, allow_fetch=False):
def get_image_digest(service, allow_push=False):
if 'image' not in service.options:
raise UserError(
"Service '{s.name}' doesn't define an image tag. An image name is "
@ -108,27 +108,24 @@ def get_image_digest(service, allow_fetch=False):
# digests
return image['RepoDigests'][0]
if not allow_fetch:
if 'build' in service.options:
raise NeedsPush(service.image_name)
else:
raise NeedsPull(service.image_name)
return fetch_image_digest(service)
def fetch_image_digest(service):
if 'build' not in service.options:
digest = service.pull()
else:
try:
digest = service.push()
except:
log.error(
"Failed to push image for service '{s.name}'. Please use an "
"image tag that can be pushed to a Docker "
"registry.".format(s=service))
raise
raise NeedsPull(service.image_name)
if not allow_push:
raise NeedsPush(service.image_name)
return push_image(service)
def push_image(service):
try:
digest = service.push()
except:
log.error(
"Failed to push image for service '{s.name}'. Please use an "
"image tag that can be pushed to a Docker "
"registry.".format(s=service))
raise
if not digest:
raise ValueError("Failed to get digest for %s" % service.name)

View File

@ -10,6 +10,7 @@ from docker.utils import kwargs_from_env
from ..const import HTTP_TIMEOUT
from .errors import UserError
from .utils import generate_user_agent
log = logging.getLogger(__name__)
@ -45,10 +46,6 @@ def docker_client(environment, version=None, tls_config=None, host=None,
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
if 'DOCKER_CLIENT_TIMEOUT' in environment:
log.warn("The DOCKER_CLIENT_TIMEOUT environment variable is deprecated. "
"Please use COMPOSE_HTTP_TIMEOUT instead.")
try:
kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
except TLSParameterError:
@ -71,4 +68,6 @@ def docker_client(environment, version=None, tls_config=None, host=None,
else:
kwargs['timeout'] = HTTP_TIMEOUT
kwargs['user_agent'] = generate_user_agent()
return Client(**kwargs)

View File

@ -13,8 +13,8 @@ from requests.exceptions import SSLError
from requests.packages.urllib3.exceptions import ReadTimeoutError
from ..const import API_VERSION_TO_ENGINE_VERSION
from ..const import HTTP_TIMEOUT
from .utils import call_silently
from .utils import is_docker_for_mac_installed
from .utils import is_mac
from .utils import is_ubuntu
@ -46,18 +46,9 @@ def handle_connection_errors(client):
raise ConnectionError()
except RequestsConnectionError as e:
if e.args and isinstance(e.args[0], ReadTimeoutError):
log_timeout_error()
log_timeout_error(client.timeout)
raise ConnectionError()
if call_silently(['which', 'docker']) != 0:
if is_mac():
exit_with_error(docker_not_found_mac)
if is_ubuntu():
exit_with_error(docker_not_found_ubuntu)
exit_with_error(docker_not_found_generic)
if call_silently(['which', 'docker-machine']) == 0:
exit_with_error(conn_error_docker_machine)
exit_with_error(conn_error_generic.format(url=client.base_url))
exit_with_error(get_conn_error_message(client.base_url))
except APIError as e:
log_api_error(e, client.api_version)
raise ConnectionError()
@ -66,13 +57,13 @@ def handle_connection_errors(client):
raise ConnectionError()
def log_timeout_error():
def log_timeout_error(timeout):
log.error(
"An HTTP request took too long to complete. Retry with --verbose to "
"obtain debug information.\n"
"If you encounter this issue regularly because of slow network "
"conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
"value (current value: %s)." % HTTP_TIMEOUT)
"value (current value: %s)." % timeout)
def log_api_error(e, client_version):
@ -97,6 +88,20 @@ def exit_with_error(msg):
raise ConnectionError()
def get_conn_error_message(url):
if call_silently(['which', 'docker']) != 0:
if is_mac():
return docker_not_found_mac
if is_ubuntu():
return docker_not_found_ubuntu
return docker_not_found_generic
if is_docker_for_mac_installed():
return conn_error_docker_for_mac
if call_silently(['which', 'docker-machine']) == 0:
return conn_error_docker_machine
return conn_error_generic.format(url=url)
docker_not_found_mac = """
Couldn't connect to Docker daemon. You might need to install Docker:
@ -122,6 +127,10 @@ conn_error_docker_machine = """
Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
"""
conn_error_docker_for_mac = """
Couldn't connect to Docker daemon. You might need to start Docker for Mac.
"""
conn_error_generic = """
Couldn't connect to Docker daemon at {url} - is it running?

View File

@ -32,6 +32,7 @@ from ..service import BuildError
from ..service import ConvergenceStrategy
from ..service import ImageType
from ..service import NeedsBuildError
from ..service import OperationFailedError
from .command import get_config_from_options
from .command import project_from_options
from .docopt_command import DocoptDispatcher
@ -61,7 +62,8 @@ def main():
except (KeyboardInterrupt, signals.ShutdownException):
log.error("Aborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError, ProjectError) as e:
except (UserError, NoSuchService, ConfigurationError,
ProjectError, OperationFailedError) as e:
log.error(e.msg)
sys.exit(1)
except BuildError as e:
@ -221,15 +223,16 @@ class TopLevelCommand(object):
Generate a Distributed Application Bundle (DAB) from the Compose file.
Images must have digests stored, which requires interaction with a
Docker registry. If digests aren't stored for all images, you can pass
`--fetch-digests` to automatically fetch them. Images for services
with a `build` key will be pushed. Images for services without a
`build` key will be pulled.
Docker registry. If digests aren't stored for all images, you can fetch
them with `docker-compose pull` or `docker-compose push`. To push images
automatically when bundling, pass `--push-images`. Only services with
a `build` option specified will have their images pushed.
Usage: bundle [options]
Options:
--fetch-digests Automatically fetch image digests if missing
--push-images Automatically push images for any services
which have a `build` option specified.
-o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dab".
@ -245,7 +248,7 @@ class TopLevelCommand(object):
try:
image_digests = get_image_digests(
self.project,
allow_fetch=options['--fetch-digests'],
allow_push=options['--push-images'],
)
except MissingDigests as e:
def list_images(images):
@ -254,12 +257,28 @@ class TopLevelCommand(object):
paras = ["Some images are missing digests."]
if e.needs_push:
paras += ["The following images need to be pushed:", list_images(e.needs_push)]
command_hint = (
"Use `docker-compose push {}` to push them. "
"You can do this automatically with `docker-compose bundle --push-images`."
.format(" ".join(sorted(e.needs_push)))
)
paras += [
"The following images can be pushed:",
list_images(e.needs_push),
command_hint,
]
if e.needs_pull:
paras += ["The following images need to be pulled:", list_images(e.needs_pull)]
command_hint = (
"Use `docker-compose pull {}` to pull them. "
.format(" ".join(sorted(e.needs_pull)))
)
paras.append("If this is OK, run `docker-compose bundle --fetch-digests`.")
paras += [
"The following images need to be pulled:",
list_images(e.needs_pull),
command_hint,
]
raise UserError("\n\n".join(paras))
@ -668,8 +687,10 @@ class TopLevelCommand(object):
'can not be used togather'
)
if options['COMMAND']:
if options['COMMAND'] is not None:
command = [options['COMMAND']] + options['ARGS']
elif options['--entrypoint'] is not None:
command = []
else:
command = service.options.get('command')

View File

@ -103,3 +103,22 @@ def get_build_version():
with open(filename) as fh:
return fh.read().strip()
def is_docker_for_mac_installed():
return is_mac() and os.path.isdir('/Applications/Docker.app')
def generate_user_agent():
parts = [
"docker-compose/{}".format(compose.__version__),
"docker-py/{}".format(docker.__version__),
]
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
pass
else:
parts.append("{}/{}".format(p_system, p_release))
return " ".join(parts)

View File

@ -1,11 +1,10 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
DEFAULT_TIMEOUT = 10
HTTP_TIMEOUT = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
HTTP_TIMEOUT = 60
IMAGE_EVENTS = ['delete', 'import', 'pull', 'push', 'tag', 'untag']
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'

7
compose/errors.py Normal file
View File

@ -0,0 +1,7 @@
from __future__ import absolute_import
from __future__ import unicode_literals
class OperationFailedError(Exception):
def __init__(self, reason):
self.msg = reason

View File

@ -12,6 +12,7 @@ from six.moves.queue import Empty
from six.moves.queue import Queue
from compose.cli.signals import ShutdownException
from compose.errors import OperationFailedError
from compose.utils import get_output_stream
@ -47,6 +48,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None):
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(get_name(obj), 'error')
elif isinstance(exception, OperationFailedError):
errors[get_name(obj)] = exception.msg
writer.write(get_name(obj), 'error')
elif isinstance(exception, UpstreamError):
writer.write(get_name(obj), 'error')
else:

View File

@ -369,6 +369,8 @@ class Project(object):
detached=False,
remove_orphans=False):
warn_for_swarm_mode(self.client)
self.initialize()
self.find_orphan_containers(remove_orphans)
@ -533,6 +535,20 @@ def get_volumes_from(project, service_dict):
return [build_volume_from(vf) for vf in volumes_from]
def warn_for_swarm_mode(client):
info = client.info()
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
log.warn(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
"use the bundle feature of the Docker experimental build.\n\n"
"More info:\n"
"https://docs.docker.com/compose/bundles\n"
)
class NoSuchService(Exception):
def __init__(self, name):
self.name = name

View File

@ -27,6 +27,7 @@ from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .const import LABEL_VERSION
from .container import Container
from .errors import OperationFailedError
from .parallel import parallel_execute
from .parallel import parallel_start
from .progress_stream import stream_output
@ -277,7 +278,11 @@ class Service(object):
if 'name' in container_options and not quiet:
log.info("Creating %s" % container_options['name'])
return Container.create(self.client, **container_options)
try:
return Container.create(self.client, **container_options)
except APIError as ex:
raise OperationFailedError("Cannot create container for service %s: %s" %
(self.name, ex.explanation))
def ensure_image_exists(self, do_build=BuildAction.none):
if self.can_be_built() and do_build == BuildAction.force:
@ -447,7 +452,10 @@ class Service(object):
def start_container(self, container):
self.connect_container_to_networks(container)
container.start()
try:
container.start()
except APIError as ex:
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
return container
def connect_container_to_networks(self, container):

View File

@ -117,7 +117,7 @@ _docker_compose_bundle() {
;;
esac
COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) )
COMPREPLY=( $( compgen -W "--fetch-digests --help --output -o" -- "$cur" ) )
}

200
docs/bundles.md Normal file
View File

@ -0,0 +1,200 @@
<!--[metadata]>
+++
title = "Docker Stacks and Distributed Application Bundles"
description = "Description of Docker and Compose's experimental support for application bundles"
keywords = ["documentation, docs, docker, compose, bundles, stacks"]
advisory = "experimental"
[menu.main]
parent="workw_compose"
+++
<![end-metadata]-->
# Docker Stacks and Distributed Application Bundles (experimental)
> **Note**: This is a copy of the [Docker Stacks and Distributed Application
> Bundles](https://github.com/docker/docker/blob/v1.12.0-rc4/experimental/docker-stacks-and-bundles.md)
> document in the [docker/docker repo](https://github.com/docker/docker).
## Overview
Docker Stacks and Distributed Application Bundles are experimental features
introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of
swarm mode, and Nodes and Services in the Engine API.
A Dockerfile can be built into an image, and containers can be created from
that image. Similarly, a docker-compose.yml can be built into a **distributed
application bundle**, and **stacks** can be created from that bundle. In that
sense, the bundle is a multi-services distributable image format.
As of Docker 1.12 and Compose 1.8, the features are experimental. Neither
Docker Engine nor the Docker Registry support distribution of bundles.
## Producing a bundle
The easiest way to produce a bundle is to generate it using `docker-compose`
from an existing `docker-compose.yml`. Of course, that's just *one* possible way
to proceed, in the same way that `docker build` isn't the only way to produce a
Docker image.
From `docker-compose`:
```bash
$ docker-compose bundle
WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring
WARNING: Unsupported key 'links' in services.nsqd - ignoring
WARNING: Unsupported key 'volumes' in services.nsqd - ignoring
[...]
Wrote bundle to vossibility-stack.dab
```
## Creating a stack from a bundle
A stack is created using the `docker deploy` command:
```bash
# docker deploy --help
Usage: docker deploy [OPTIONS] STACK
Create and update a stack
Options:
--file string Path to a Distributed Application Bundle file (Default: STACK.dab)
--help Print usage
--with-registry-auth Send registry authentication details to Swarm agents
```
Let's deploy the stack created before:
```bash
# docker deploy vossibility-stack
Loading bundle from vossibility-stack.dab
Creating service vossibility-stack_elasticsearch
Creating service vossibility-stack_kibana
Creating service vossibility-stack_logstash
Creating service vossibility-stack_lookupd
Creating service vossibility-stack_nsqd
Creating service vossibility-stack_vossibility-collector
```
We can verify that services were correctly created:
```bash
# docker service ls
ID NAME REPLICAS IMAGE
COMMAND
29bv0vnlm903 vossibility-stack_lookupd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqlookupd
4awt47624qwh vossibility-stack_nsqd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqd --data-path=/data --lookupd-tcp-address=lookupd:4160
4tjx9biia6fs vossibility-stack_elasticsearch 1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa
7563uuzr9eys vossibility-stack_kibana 1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03
9gc5m4met4he vossibility-stack_logstash 1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe logstash -f /etc/logstash/conf.d/logstash.conf
axqh55ipl40h vossibility-stack_vossibility-collector 1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba --config /config/config.toml --debug
```
## Managing stacks
Stacks are managed using the `docker stack` command:
```bash
# docker stack --help
Usage: docker stack COMMAND
Manage Docker stacks
Options:
--help Print usage
Commands:
config Print the stack configuration
deploy Create and update a stack
rm Remove the stack
services List the services in the stack
tasks List the tasks in the stack
Run 'docker stack COMMAND --help' for more information on a command.
```
## Bundle file format
Distributed application bundles are described in a JSON format. When bundles
are persisted as files, the file extension is `.dab`.
A bundle has two top-level fields: `version` and `services`. The version used
by Docker 1.12 tools is `0.1`.
`services` in the bundle are the services that comprise the app. They
correspond to the new `Service` object introduced in the 1.12 Docker Engine API.
A service has the following fields:
<dl>
<dt>
Image (required) <code>string</code>
</dt>
<dd>
The image that the service will run. Docker images should be referenced
with full content hash to fully specify the deployment artifact for the
service. Example:
<code>postgres@sha256:e0a230a9f5b4e1b8b03bb3e8cf7322b0e42b7838c5c87f4545edb48f5eb8f077</code>
</dd>
<dt>
Command <code>[]string</code>
</dt>
<dd>
Command to run in service containers.
</dd>
<dt>
Args <code>[]string</code>
</dt>
<dd>
Arguments passed to the service containers.
</dd>
<dt>
Env <code>[]string</code>
</dt>
<dd>
Environment variables.
</dd>
<dt>
Labels <code>map[string]string</code>
</dt>
<dd>
Labels used for setting meta data on services.
</dd>
<dt>
Ports <code>[]Port</code>
</dt>
<dd>
Service ports (composed of <code>Port</code> (<code>int</code>) and
<code>Protocol</code> (<code>string</code>). A service description can
only specify the container port to be exposed. These ports can be
mapped on runtime hosts at the operator's discretion.
</dd>
<dt>
WorkingDir <code>string</code>
</dt>
<dd>
Working directory inside the service containers.
</dd>
<dt>
User <code>string</code>
</dt>
<dd>
Username or UID (format: <code>&lt;name|uid&gt;[:&lt;group|gid&gt;]</code>).
</dd>
<dt>
Networks <code>[]string</code>
</dt>
<dd>
Networks that the service containers should be connected to. An entity
deploying a bundle should create networks as needed.
</dd>
</dl>
> **Note:** Some configuration options are not yet supported in the DAB format,
> including volume mounts.

View File

@ -115,22 +115,41 @@ specified.
> [Version 2 file format](#version-2) only.
Add build arguments. You can use either an array or a dictionary. Any
boolean values; true, false, yes, no, need to be enclosed in quotes to ensure
they are not converted to True or False by the YML parser.
Add build arguments, which are environment variables accessible only during the
build process.
Build arguments with only a key are resolved to their environment value on the
machine Compose is running on.
First, specify the arguments in your Dockerfile:
ARG buildno
ARG password
RUN echo "Build number: $buildno"
RUN script-requiring-password.sh "$password"
Then specify the arguments under the `build` key. You can pass either a mapping
or a list:
build:
context: .
args:
buildno: 1
user: someuser
password: secret
build:
context: .
args:
- buildno=1
- user=someuser
- password=secret
You can omit the value when specifying a build argument, in which case its value
at build time is the value in the environment where Compose is running.
args:
- buildno
- password
> **Note**: YAML boolean values (`true`, `false`, `yes`, `no`, `on`, `off`) must
> be enclosed in quotes, so that the parser interprets them as strings.
### cap_add, cap_drop
@ -274,6 +293,11 @@ beginning with `#` (i.e. comments) are ignored, as are blank lines.
# Set Rails/Rack environment
RACK_ENV=development
> **Note:** If your service specifies a [build](#build) option, variables
> defined in environment files will _not_ be automatically visible during the
> build. Use the [args](#args) sub-option of `build` to define build-time
> environment variables.
### environment
Add environment variables. You can use either an array or a dictionary. Any
@ -293,6 +317,11 @@ machine Compose is running on, which can be helpful for secret or host-specific
- SHOW=true
- SESSION_SECRET
> **Note:** If your service specifies a [build](#build) option, variables
> defined in `environment` will _not_ be automatically visible during the
> build. Use the [args](#args) sub-option of `build` to define build-time
> environment variables.
### expose
Expose ports without publishing them to the host machine - they'll only be

View File

@ -39,7 +39,7 @@ which the release page specifies, in your terminal.
The following is an example command illustrating the format:
curl -L https://github.com/docker/compose/releases/download/1.8.0-rc2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
curl -L https://github.com/docker/compose/releases/download/1.8.0-rc3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
If you have problems installing with `curl`, see
[Alternative Install Options](#alternative-install-options).
@ -54,7 +54,7 @@ which the release page specifies, in your terminal.
7. Test the installation.
$ docker-compose --version
docker-compose version: 1.8.0-rc2
docker-compose version: 1.8.0-rc3
## Alternative install options
@ -77,7 +77,7 @@ to get started.
Compose can also be run inside a container, from a small bash script wrapper.
To install compose as a container run:
$ curl -L https://github.com/docker/compose/releases/download/1.8.0-rc2/run.sh > /usr/local/bin/docker-compose
$ curl -L https://github.com/docker/compose/releases/download/1.8.0-rc3/run.sh > /usr/local/bin/docker-compose
$ chmod +x /usr/local/bin/docker-compose
## Master builds

31
docs/reference/bundle.md Normal file
View File

@ -0,0 +1,31 @@
<!--[metadata]>
+++
title = "bundle"
description = "Create a distributed application bundle from the Compose file."
keywords = ["fig, composition, compose, docker, orchestration, cli, bundle"]
[menu.main]
identifier="bundle.compose"
parent = "smn_compose_cli"
+++
<![end-metadata]-->
# bundle
```
Usage: bundle [options]
Options:
--push-images Automatically push images for any services
which have a `build` option specified.
-o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dab".
```
Generate a Distributed Application Bundle (DAB) from the Compose file.
Images must have digests stored, which requires interaction with a
Docker registry. If digests aren't stored for all images, you can fetch
them with `docker-compose pull` or `docker-compose push`. To push images
automatically when bundling, pass `--push-images`. Only services with
a `build` option specified will have their images pushed.

21
docs/reference/push.md Normal file
View File

@ -0,0 +1,21 @@
<!--[metadata]>
+++
title = "push"
description = "Pushes service images."
keywords = ["fig, composition, compose, docker, orchestration, cli, push"]
[menu.main]
identifier="push.compose"
parent = "smn_compose_cli"
+++
<![end-metadata]-->
# push
```
Usage: push [options] [SERVICE...]
Options:
--ignore-push-failures Push what it can and ignores images with push failures.
```
Pushes images for services.

View File

@ -1,7 +1,7 @@
PyYAML==3.11
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
cached-property==1.2.0
docker-py==1.9.0rc2
docker-py==1.9.0
dockerpty==0.4.1
docopt==0.6.1
enum34==1.0.4; python_version < '3.4'

View File

@ -15,7 +15,7 @@
set -e
VERSION="1.8.0-rc2"
VERSION="1.8.0-rc3"
IMAGE="docker/compose:$VERSION"

View File

@ -34,7 +34,7 @@ install_requires = [
'requests >= 2.6.1, < 2.8',
'texttable >= 0.8.1, < 0.9',
'websocket-client >= 0.32.0, < 1.0',
'docker-py == 1.9.0rc2',
'docker-py >= 1.9.0, < 2.0',
'dockerpty >= 0.4.1, < 0.5',
'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3',

View File

@ -4,7 +4,6 @@ from __future__ import unicode_literals
import datetime
import json
import os
import shlex
import signal
import subprocess
import time
@ -965,16 +964,54 @@ class CLITestCase(DockerClientTestCase):
[u'/bin/true'],
)
def test_run_service_with_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.dispatch(['run', '--entrypoint', '/bin/echo', name, 'helloworld'])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
self.assertEqual(
shlex.split(container.human_readable_command),
[u'/bin/echo', u'helloworld'],
)
def test_run_service_with_dockerfile_entrypoint(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['printf']
assert container.get('Config.Cmd') == ['default', 'args']
def test_run_service_with_dockerfile_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert not container.get('Config.Cmd')
def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert container.get('Config.Cmd') == ['foo']
def test_run_service_with_compose_file_entrypoint(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['printf']
assert container.get('Config.Cmd') == ['default', 'args']
def test_run_service_with_compose_file_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert not container.get('Config.Cmd')
def test_run_service_with_compose_file_entrypoint_and_command_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert container.get('Config.Cmd') == ['foo']
def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', '--entrypoint', 'echo', 'test', ''])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert container.get('Config.Cmd') == ['']
def test_run_service_with_user_overridden(self):
self.base_dir = 'tests/fixtures/user-composefile'

View File

@ -1,2 +0,0 @@
service:
build: .

View File

@ -0,0 +1,6 @@
version: "2"
services:
test:
image: busybox
entrypoint: printf
command: default args

View File

@ -1,3 +1,4 @@
FROM busybox:latest
LABEL com.docker.compose.test_image=true
ENTRYPOINT echo "From prebuilt entrypoint"
ENTRYPOINT ["printf"]
CMD ["default", "args"]

View File

@ -0,0 +1,4 @@
version: "2"
services:
test:
build: .

View File

@ -738,7 +738,10 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.assertIn("ERROR: for composetest_web_2 Boom", mock_stderr.getvalue())
self.assertIn(
"ERROR: for composetest_web_2 Cannot create container for service web: Boom",
mock_stderr.getvalue()
)
def test_scale_with_unexpected_exception(self):
"""Test that when scaling if the API returns an error, that is not of type

View File

@ -41,44 +41,30 @@ def test_get_image_digest_no_image(mock_service):
assert "doesn't define an image tag" in exc.exconly()
def test_fetch_image_digest_for_image_with_saved_digest(mock_service):
mock_service.options['image'] = image_id = 'abcd'
mock_service.pull.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': ['digest1']}
digest = bundle.fetch_image_digest(mock_service)
assert digest == image_id + '@' + expected
mock_service.pull.assert_called_once_with()
assert not mock_service.push.called
assert not mock_service.client.pull.called
def test_fetch_image_digest_for_image(mock_service):
mock_service.options['image'] = image_id = 'abcd'
mock_service.pull.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': []}
digest = bundle.fetch_image_digest(mock_service)
assert digest == image_id + '@' + expected
mock_service.pull.assert_called_once_with()
assert not mock_service.push.called
mock_service.client.pull.assert_called_once_with(digest)
def test_fetch_image_digest_for_build(mock_service):
def test_push_image_with_saved_digest(mock_service):
mock_service.options['build'] = '.'
mock_service.options['image'] = image_id = 'abcd'
mock_service.push.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': ['digest1']}
digest = bundle.fetch_image_digest(mock_service)
digest = bundle.push_image(mock_service)
assert digest == image_id + '@' + expected
mock_service.push.assert_called_once_with()
assert not mock_service.pull.called
assert not mock_service.client.pull.called
assert not mock_service.client.push.called
def test_push_image(mock_service):
mock_service.options['build'] = '.'
mock_service.options['image'] = image_id = 'abcd'
mock_service.push.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': []}
digest = bundle.push_image(mock_service)
assert digest == image_id + '@' + expected
mock_service.push.assert_called_once_with()
mock_service.client.pull.assert_called_once_with(digest)
def test_to_bundle():

View File

@ -2,10 +2,13 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import os
import platform
import docker
import pytest
import compose
from compose.cli import errors
from compose.cli.docker_client import docker_client
from compose.cli.docker_client import tls_config_from_options
from tests import mock
@ -19,11 +22,35 @@ class DockerClientTestCase(unittest.TestCase):
del os.environ['HOME']
docker_client(os.environ)
@mock.patch.dict(os.environ)
def test_docker_client_with_custom_timeout(self):
timeout = 300
with mock.patch('compose.cli.docker_client.HTTP_TIMEOUT', 300):
client = docker_client(os.environ)
self.assertEqual(client.timeout, int(timeout))
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
client = docker_client(os.environ)
assert client.timeout == 123
@mock.patch.dict(os.environ)
def test_custom_timeout_error(self):
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
client = docker_client(os.environ)
with mock.patch('compose.cli.errors.log') as fake_log:
with pytest.raises(errors.ConnectionError):
with errors.handle_connection_errors(client):
raise errors.RequestsConnectionError(
errors.ReadTimeoutError(None, None, None))
assert fake_log.error.call_count == 1
assert '123' in fake_log.error.call_args[0][0]
def test_user_agent(self):
client = docker_client(os.environ)
expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format(
compose.__version__,
docker.__version__,
platform.system(),
platform.release()
)
self.assertEqual(client.headers['User-Agent'], expected)
class TLSConfigTestCase(unittest.TestCase):

View File

@ -510,3 +510,35 @@ class ProjectTest(unittest.TestCase):
project.down(ImageType.all, True)
self.mock_client.remove_image.assert_called_once_with("busybox:latest")
def test_warning_in_swarm_mode(self):
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
project = Project('composetest', [], self.mock_client)
with mock.patch('compose.project.log') as fake_log:
project.up()
assert fake_log.warn.call_count == 1
def test_no_warning_on_stop(self):
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
project = Project('composetest', [], self.mock_client)
with mock.patch('compose.project.log') as fake_log:
project.stop()
assert fake_log.warn.call_count == 0
def test_no_warning_in_normal_mode(self):
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'inactive'}}
project = Project('composetest', [], self.mock_client)
with mock.patch('compose.project.log') as fake_log:
project.up()
assert fake_log.warn.call_count == 0
def test_no_warning_with_no_swarm_info(self):
self.mock_client.info.return_value = {}
project = Project('composetest', [], self.mock_client)
with mock.patch('compose.project.log') as fake_log:
project.up()
assert fake_log.warn.call_count == 0