Merge pull request #4750 from docker/bump-1.13.0-rc1

Bump 1.13.0 rc1
This commit is contained in:
Joffrey F 2017-04-24 11:13:19 -07:00 committed by GitHub
commit f4328ccf6d
26 changed files with 876 additions and 157 deletions

View File

@ -1,6 +1,53 @@
Change log Change log
========== ==========
1.13.0 (2017-05-01)
-------------------
### Breaking changes
- `docker-compose up` now resets a service's scaling to its default value.
You can use the newly introduced `--scale` option to specify a custom
scale value
### New features
#### Compose file version 2.2
- Introduced version 2.2 of the `docker-compose.yml` specification. This
version requires to be used with Docker Engine 1.13.0 or above
- Added support for `init` in service definitions.
- Added support for `scale` in service definitions. The configuration's value
can be overridden using the `--scale` flag in `docker-compose up`.
Please note that the `scale` command is disabled for this file format
#### Compose file version 2.x
- Added support for `options` in the `ipam` section of network definitions
### Bugfixes
- Fixed a bug where paths provided to compose via the `-f` option were not
being resolved properly
- Fixed a bug where the `ext_ip::target_port` notation in the ports section
was incorrectly marked as invalid
- Fixed an issue where the `exec` command would sometimes not return control
to the terminal when using the `-d` flag
- Fixed a bug where secrets were missing from the output of the `config`
command for v3.2 files
- Fixed an issue where `docker-compose` would hang if no internet connection
was available
- Fixed an issue where paths containing unicode characters passed via the `-f`
flag were causing Compose to crash
1.12.0 (2017-04-04) 1.12.0 (2017-04-04)
------------------- -------------------
@ -8,7 +55,7 @@ Change log
#### Compose file version 3.2 #### Compose file version 3.2
- Introduced version 3.2 of the `docker-compose.yml` specification. - Introduced version 3.2 of the `docker-compose.yml` specification
- Added support for `cache_from` in the `build` section of services - Added support for `cache_from` in the `build` section of services

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
__version__ = '1.12.0' __version__ = '1.13.0-rc1'

View File

@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
import os
import subprocess import subprocess
import sys import sys
@ -12,8 +13,12 @@ try:
# https://github.com/docker/compose/issues/4425 # https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481 # https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py # https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
env = os.environ.copy()
env[str('PIP_DISABLE_PIP_VERSION_CHECK')] = str('1')
s_cmd = subprocess.Popen( s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE ['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env=env
) )
packages = s_cmd.communicate()[0].splitlines() packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len( dockerpy_installed = len(

View File

@ -49,14 +49,17 @@ def get_config_from_options(base_dir, options):
def get_config_path_from_options(base_dir, options, environment): def get_config_path_from_options(base_dir, options, environment):
def unicode_paths(paths):
return [p.decode('utf-8') if isinstance(p, six.binary_type) else p for p in paths]
file_option = options.get('--file') file_option = options.get('--file')
if file_option: if file_option:
return file_option return unicode_paths(file_option)
config_files = environment.get('COMPOSE_FILE') config_files = environment.get('COMPOSE_FILE')
if config_files: if config_files:
pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep) pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep)
return config_files.split(pathsep) return unicode_paths(config_files.split(pathsep))
return None return None

View File

@ -26,6 +26,7 @@ from ..config import resolve_build_args
from ..config.environment import Environment from ..config.environment import Environment
from ..config.serialize import serialize_config from ..config.serialize import serialize_config
from ..config.types import VolumeSpec from ..config.types import VolumeSpec
from ..const import COMPOSEFILE_V2_2 as V2_2
from ..const import IS_WINDOWS_PLATFORM from ..const import IS_WINDOWS_PLATFORM
from ..errors import StreamParseError from ..errors import StreamParseError
from ..progress_stream import StreamOutputError from ..progress_stream import StreamOutputError
@ -439,7 +440,7 @@ class TopLevelCommand(object):
exec_id = container.create_exec(command, **create_exec_options) exec_id = container.create_exec(command, **create_exec_options)
if detach: if detach:
container.start_exec(exec_id, tty=tty) container.start_exec(exec_id, tty=tty, stream=True)
return return
signals.set_signal_handler_to_shutdown() signals.set_signal_handler_to_shutdown()
@ -771,15 +772,13 @@ class TopLevelCommand(object):
""" """
timeout = timeout_from_opts(options) timeout = timeout_from_opts(options)
for s in options['SERVICE=NUM']: if self.project.config_version == V2_2:
if '=' not in s: raise UserError(
raise UserError('Arguments to scale should be in the form service=num') 'The scale command is incompatible with the v2.2 format. '
service_name, num = s.split('=', 1) 'Use the up command with the --scale flag instead.'
try: )
num = int(num)
except ValueError: for service_name, num in parse_scale_args(options['SERVICE=NUM']).items():
raise UserError('Number of containers for service "%s" is not a '
'number' % service_name)
self.project.get_service(service_name).scale(num, timeout=timeout) self.project.get_service(service_name).scale(num, timeout=timeout)
def start(self, options): def start(self, options):
@ -875,7 +874,7 @@ class TopLevelCommand(object):
If you want to force Compose to stop and recreate all containers, use the If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag. `--force-recreate` flag.
Usage: up [options] [SERVICE...] Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]
Options: Options:
-d Detached mode: Run containers in the background, -d Detached mode: Run containers in the background,
@ -898,7 +897,9 @@ class TopLevelCommand(object):
--remove-orphans Remove containers for services not --remove-orphans Remove containers for services not
defined in the Compose file defined in the Compose file
--exit-code-from SERVICE Return the exit code of the selected service container. --exit-code-from SERVICE Return the exit code of the selected service container.
Requires --abort-on-container-exit. Implies --abort-on-container-exit.
--scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale`
setting in the Compose file if present.
""" """
start_deps = not options['--no-deps'] start_deps = not options['--no-deps']
exit_value_from = exitval_from_opts(options, self.project) exit_value_from = exitval_from_opts(options, self.project)
@ -919,7 +920,9 @@ class TopLevelCommand(object):
do_build=build_action_from_opts(options), do_build=build_action_from_opts(options),
timeout=timeout, timeout=timeout,
detached=detached, detached=detached,
remove_orphans=remove_orphans) remove_orphans=remove_orphans,
scale_override=parse_scale_args(options['--scale']),
)
if detached: if detached:
return return
@ -1238,3 +1241,19 @@ def call_docker(args):
log.debug(" ".join(map(pipes.quote, args))) log.debug(" ".join(map(pipes.quote, args)))
return subprocess.call(args) return subprocess.call(args)
def parse_scale_args(options):
res = {}
for s in options:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError(
'Number of containers for service "%s" is not a number' % service_name
)
res[service_name] = num
return res

View File

@ -108,6 +108,7 @@ ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'log_opt', 'log_opt',
'logging', 'logging',
'network_mode', 'network_mode',
'init',
] ]
DOCKER_VALID_URL_PREFIXES = ( DOCKER_VALID_URL_PREFIXES = (
@ -234,10 +235,10 @@ class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name conf
config) config)
def find(base_dir, filenames, environment, override_dir='.'): def find(base_dir, filenames, environment, override_dir=None):
if filenames == ['-']: if filenames == ['-']:
return ConfigDetails( return ConfigDetails(
os.path.abspath(override_dir), os.path.abspath(override_dir) if override_dir else os.getcwd(),
[ConfigFile(None, yaml.safe_load(sys.stdin))], [ConfigFile(None, yaml.safe_load(sys.stdin))],
environment environment
) )
@ -249,7 +250,7 @@ def find(base_dir, filenames, environment, override_dir='.'):
log.debug("Using configuration files: {}".format(",".join(filenames))) log.debug("Using configuration files: {}".format(",".join(filenames)))
return ConfigDetails( return ConfigDetails(
override_dir or os.path.dirname(filenames[0]), override_dir if override_dir else os.path.dirname(filenames[0]),
[ConfigFile.from_filename(f) for f in filenames], [ConfigFile.from_filename(f) for f in filenames],
environment environment
) )

View File

@ -253,6 +253,13 @@
"driver": {"type": "string"}, "driver": {"type": "string"},
"config": { "config": {
"type": "array" "type": "array"
},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": "string"}
},
"additionalProperties": false
} }
}, },
"additionalProperties": false "additionalProperties": false

View File

@ -298,6 +298,13 @@
"driver": {"type": "string"}, "driver": {"type": "string"},
"config": { "config": {
"type": "array" "type": "array"
},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": "string"}
},
"additionalProperties": false
} }
}, },
"additionalProperties": false "additionalProperties": false

View File

@ -0,0 +1,387 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v2.2.json",
"type": "object",
"properties": {
"version": {
"type": "string"
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
}
},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
}
]
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"container_name": {"type": "string"},
"cpu_shares": {"type": ["number", "string"]},
"cpu_quota": {"type": ["number", "string"]},
"cpuset": {"type": "string"},
"depends_on": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"additionalProperties": false,
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"type": "object",
"additionalProperties": false,
"properties": {
"condition": {
"type": "string",
"enum": ["service_started", "service_healthy"]
}
},
"required": ["condition"]
}
}
}
]
},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns_opt": {
"type": "array",
"items": {
"type": "string"
},
"uniqueItems": true
},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"extends": {
"oneOf": [
{
"type": "string"
},
{
"type": "object",
"properties": {
"service": {"type": "string"},
"file": {"type": "string"}
},
"required": ["service"],
"additionalProperties": false
}
]
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": ["boolean", "string"]},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {"type": "object"}
},
"additionalProperties": false
},
"mac_address": {"type": "string"},
"mem_limit": {"type": ["number", "string"]},
"mem_reservation": {"type": ["string", "integer"]},
"mem_swappiness": {"type": "integer"},
"memswap_limit": {"type": ["number", "string"]},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"},
"link_local_ips": {"$ref": "#/definitions/list_of_strings"}
},
"additionalProperties": false
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
"group_add": {
"type": "array",
"items": {
"type": ["string", "number"]
},
"uniqueItems": true
},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "ports"
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"scale": {"type": "integer"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"pids_limit": {"type": ["number", "string"]},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"volume_driver": {"type": "string"},
"volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"working_dir": {"type": "string"}
},
"dependencies": {
"memswap_limit": ["mem_limit"]
},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string"}
}
},
"network": {
"id": "#/definitions/network",
"type": "object",
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array"
}
},
"additionalProperties": false
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"internal": {"type": "boolean"},
"enable_ipv6": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
},
"additionalProperties": false
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

View File

@ -7,8 +7,9 @@ import yaml
from compose.config import types from compose.config import types
from compose.const import COMPOSEFILE_V1 as V1 from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_1 as V2_1 from compose.const import COMPOSEFILE_V2_1 as V2_1
from compose.const import COMPOSEFILE_V2_2 as V2_2
from compose.const import COMPOSEFILE_V3_1 as V3_1 from compose.const import COMPOSEFILE_V3_1 as V3_1
from compose.const import COMPOSEFILE_V3_1 as V3_2 from compose.const import COMPOSEFILE_V3_2 as V3_2
def serialize_config_type(dumper, data): def serialize_config_type(dumper, data):
@ -95,7 +96,7 @@ def denormalize_service_dict(service_dict, version, image_digest=None):
if version == V1 and 'network_mode' not in service_dict: if version == V1 and 'network_mode' not in service_dict:
service_dict['network_mode'] = 'bridge' service_dict['network_mode'] = 'bridge'
if 'depends_on' in service_dict and version != V2_1: if 'depends_on' in service_dict and version not in (V2_1, V2_2):
service_dict['depends_on'] = sorted([ service_dict['depends_on'] = sorted([
svc for svc in service_dict['depends_on'].keys() svc for svc in service_dict['depends_on'].keys()
]) ])
@ -111,9 +112,9 @@ def denormalize_service_dict(service_dict, version, image_digest=None):
) )
if 'ports' in service_dict and version not in (V3_2,): if 'ports' in service_dict and version not in (V3_2,):
service_dict['ports'] = map( service_dict['ports'] = [
lambda p: p.legacy_repr() if isinstance(p, types.ServicePort) else p, p.legacy_repr() if isinstance(p, types.ServicePort) else p
service_dict['ports'] for p in service_dict['ports']
) ]
return service_dict return service_dict

View File

@ -267,7 +267,7 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
@classmethod @classmethod
def parse(cls, spec): def parse(cls, spec):
if isinstance(spec, cls): if isinstance(spec, cls):
# WHen extending a service with ports, the port definitions have already been parsed # When extending a service with ports, the port definitions have already been parsed
return [spec] return [spec]
if not isinstance(spec, dict): if not isinstance(spec, dict):
@ -316,7 +316,7 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
def normalize_port_dict(port): def normalize_port_dict(port):
return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format( return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
published=port.get('published', ''), published=port.get('published', ''),
is_pub=(':' if port.get('published') else ''), is_pub=(':' if port.get('published') or port.get('external_ip') else ''),
target=port.get('target'), target=port.get('target'),
protocol=port.get('protocol', 'tcp'), protocol=port.get('protocol', 'tcp'),
external_ip=port.get('external_ip', ''), external_ip=port.get('external_ip', ''),

View File

@ -21,6 +21,7 @@ SECRETS_PATH = '/run/secrets'
COMPOSEFILE_V1 = '1' COMPOSEFILE_V1 = '1'
COMPOSEFILE_V2_0 = '2.0' COMPOSEFILE_V2_0 = '2.0'
COMPOSEFILE_V2_1 = '2.1' COMPOSEFILE_V2_1 = '2.1'
COMPOSEFILE_V2_2 = '2.2'
COMPOSEFILE_V3_0 = '3.0' COMPOSEFILE_V3_0 = '3.0'
COMPOSEFILE_V3_1 = '3.1' COMPOSEFILE_V3_1 = '3.1'
@ -30,6 +31,7 @@ API_VERSIONS = {
COMPOSEFILE_V1: '1.21', COMPOSEFILE_V1: '1.21',
COMPOSEFILE_V2_0: '1.22', COMPOSEFILE_V2_0: '1.22',
COMPOSEFILE_V2_1: '1.24', COMPOSEFILE_V2_1: '1.24',
COMPOSEFILE_V2_2: '1.25',
COMPOSEFILE_V3_0: '1.25', COMPOSEFILE_V3_0: '1.25',
COMPOSEFILE_V3_1: '1.25', COMPOSEFILE_V3_1: '1.25',
COMPOSEFILE_V3_2: '1.25', COMPOSEFILE_V3_2: '1.25',
@ -39,6 +41,7 @@ API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V1]: '1.9.0', API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0', API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0',
API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0', API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
API_VERSIONS[COMPOSEFILE_V2_2]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0', API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0', API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0', API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0',

View File

@ -123,6 +123,7 @@ def create_ipam_config_from_dict(ipam_dict):
) )
for config in ipam_dict.get('config', []) for config in ipam_dict.get('config', [])
], ],
options=ipam_dict.get('options')
) )
@ -157,6 +158,12 @@ def check_remote_ipam_config(remote, local):
if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')): if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses') raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses')
remote_opts = remote_ipam.get('Options', {})
local_opts = local.ipam.get('options', {})
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if remote_opts.get(k) != local_opts.get(k):
raise NetworkConfigChangedError(local.full_name, 'IPAM option "{}"'.format(k))
def check_remote_network_config(remote, local): def check_remote_network_config(remote, local):
if local.driver and remote.get('Driver') != local.driver: if local.driver and remote.get('Driver') != local.driver:

View File

@ -260,10 +260,6 @@ def parallel_remove(containers, options):
parallel_operation(stopped_containers, 'remove', options, 'Removing') parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_start(containers, options):
parallel_operation(containers, 'start', options, 'Starting')
def parallel_pause(containers, options): def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing') parallel_operation(containers, 'pause', options, 'Pausing')

View File

@ -57,12 +57,13 @@ class Project(object):
""" """
A collection of services. A collection of services.
""" """
def __init__(self, name, services, client, networks=None, volumes=None): def __init__(self, name, services, client, networks=None, volumes=None, config_version=None):
self.name = name self.name = name
self.services = services self.services = services
self.client = client self.client = client
self.volumes = volumes or ProjectVolumes({}) self.volumes = volumes or ProjectVolumes({})
self.networks = networks or ProjectNetworks({}, False) self.networks = networks or ProjectNetworks({}, False)
self.config_version = config_version
def labels(self, one_off=OneOffFilter.exclude): def labels(self, one_off=OneOffFilter.exclude):
labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)] labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
@ -82,7 +83,7 @@ class Project(object):
networks, networks,
use_networking) use_networking)
volumes = ProjectVolumes.from_config(name, config_data, client) volumes = ProjectVolumes.from_config(name, config_data, client)
project = cls(name, [], client, project_networks, volumes) project = cls(name, [], client, project_networks, volumes, config_data.version)
for service_dict in config_data.services: for service_dict in config_data.services:
service_dict = dict(service_dict) service_dict = dict(service_dict)
@ -380,13 +381,17 @@ class Project(object):
do_build=BuildAction.none, do_build=BuildAction.none,
timeout=None, timeout=None,
detached=False, detached=False,
remove_orphans=False): remove_orphans=False,
scale_override=None):
warn_for_swarm_mode(self.client) warn_for_swarm_mode(self.client)
self.initialize() self.initialize()
self.find_orphan_containers(remove_orphans) self.find_orphan_containers(remove_orphans)
if scale_override is None:
scale_override = {}
services = self.get_services_without_duplicate( services = self.get_services_without_duplicate(
service_names, service_names,
include_deps=start_deps) include_deps=start_deps)
@ -399,7 +404,8 @@ class Project(object):
return service.execute_convergence_plan( return service.execute_convergence_plan(
plans[service.name], plans[service.name],
timeout=timeout, timeout=timeout,
detached=detached detached=detached,
scale_override=scale_override.get(service.name)
) )
def get_deps(service): def get_deps(service):
@ -589,10 +595,13 @@ def get_secrets(service, service_secrets, secret_defs):
continue continue
if secret.uid or secret.gid or secret.mode: if secret.uid or secret.gid or secret.mode:
log.warn("Service \"{service}\" uses secret \"{secret}\" with uid, " log.warn(
"Service \"{service}\" uses secret \"{secret}\" with uid, "
"gid, or mode. These fields are not supported by this " "gid, or mode. These fields are not supported by this "
"implementation of the Compose file".format( "implementation of the Compose file".format(
service=service, secret=secret.source)) service=service, secret=secret.source
)
)
secrets.append({'secret': secret, 'file': secret_def.get('file')}) secrets.append({'secret': secret, 'file': secret_def.get('file')})

View File

@ -38,7 +38,6 @@ from .errors import HealthCheckFailed
from .errors import NoHealthCheckConfigured from .errors import NoHealthCheckConfigured
from .errors import OperationFailedError from .errors import OperationFailedError
from .parallel import parallel_execute from .parallel import parallel_execute
from .parallel import parallel_start
from .progress_stream import stream_output from .progress_stream import stream_output
from .progress_stream import StreamOutputError from .progress_stream import StreamOutputError
from .utils import json_hash from .utils import json_hash
@ -48,7 +47,7 @@ from .utils import parse_seconds_float
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
DOCKER_START_KEYS = [ HOST_CONFIG_KEYS = [
'cap_add', 'cap_add',
'cap_drop', 'cap_drop',
'cgroup_parent', 'cgroup_parent',
@ -60,6 +59,7 @@ DOCKER_START_KEYS = [
'env_file', 'env_file',
'extra_hosts', 'extra_hosts',
'group_add', 'group_add',
'init',
'ipc', 'ipc',
'read_only', 'read_only',
'log_driver', 'log_driver',
@ -147,6 +147,7 @@ class Service(object):
network_mode=None, network_mode=None,
networks=None, networks=None,
secrets=None, secrets=None,
scale=None,
**options **options
): ):
self.name = name self.name = name
@ -158,6 +159,7 @@ class Service(object):
self.network_mode = network_mode or NetworkMode(None) self.network_mode = network_mode or NetworkMode(None)
self.networks = networks or {} self.networks = networks or {}
self.secrets = secrets or [] self.secrets = secrets or []
self.scale_num = scale or 1
self.options = options self.options = options
def __repr__(self): def __repr__(self):
@ -188,16 +190,7 @@ class Service(object):
self.start_container_if_stopped(c, **options) self.start_container_if_stopped(c, **options)
return containers return containers
def scale(self, desired_num, timeout=None): def show_scale_warnings(self, desired_num):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if self.custom_container_name and desired_num > 1: if self.custom_container_name and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". ' log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. ' 'Docker requires each container to have a unique name. '
@ -209,14 +202,18 @@ class Service(object):
'for this service are created on a single host, the port will clash.' 'for this service are created on a single host, the port will clash.'
% self.name) % self.name)
def create_and_start(service, number): def scale(self, desired_num, timeout=None):
container = service.create_container(number=number, quiet=True) """
service.start_container(container) Adjusts the number of containers to the specified number and ensures
return container they are running.
def stop_and_remove(container): - creates containers until there are at least `desired_num`
container.stop(timeout=self.stop_timeout(timeout)) - stops containers until there are at most `desired_num` running
container.remove() - starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
self.show_scale_warnings(desired_num)
running_containers = self.containers(stopped=False) running_containers = self.containers(stopped=False)
num_running = len(running_containers) num_running = len(running_containers)
@ -227,11 +224,10 @@ class Service(object):
return return
if desired_num > num_running: if desired_num > num_running:
# we need to start/create until we have desired_num
all_containers = self.containers(stopped=True) all_containers = self.containers(stopped=True)
if num_running != len(all_containers): if num_running != len(all_containers):
# we have some stopped containers, let's start them up again # we have some stopped containers, check for divergences
stopped_containers = [ stopped_containers = [
c for c in all_containers if not c.is_running c for c in all_containers if not c.is_running
] ]
@ -240,38 +236,14 @@ class Service(object):
divergent_containers = [ divergent_containers = [
c for c in stopped_containers if self._containers_have_diverged([c]) c for c in stopped_containers if self._containers_have_diverged([c])
] ]
stopped_containers = sorted(
set(stopped_containers) - set(divergent_containers),
key=attrgetter('number')
)
for c in divergent_containers: for c in divergent_containers:
c.remove() c.remove()
num_stopped = len(stopped_containers) all_containers = list(set(all_containers) - set(divergent_containers))
if num_stopped + num_running > desired_num: sorted_containers = sorted(all_containers, key=attrgetter('number'))
num_to_start = desired_num - num_running self._execute_convergence_start(
containers_to_start = stopped_containers[:num_to_start] sorted_containers, desired_num, timeout, True, True
else:
containers_to_start = stopped_containers
parallel_start(containers_to_start, {})
num_running += len(containers_to_start)
num_to_create = desired_num - num_running
next_number = self._next_container_number()
container_numbers = [
number for number in range(
next_number, next_number + num_to_create
)
]
parallel_execute(
container_numbers,
lambda n: create_and_start(service=self, number=n),
lambda n: self.get_container_name(n),
"Creating and starting"
) )
if desired_num < num_running: if desired_num < num_running:
@ -281,12 +253,7 @@ class Service(object):
running_containers, running_containers,
key=attrgetter('number')) key=attrgetter('number'))
parallel_execute( self._downscale(sorted_running_containers[-num_to_stop:], timeout)
sorted_running_containers[-num_to_stop:],
stop_and_remove,
lambda c: c.name,
"Stopping and removing",
)
def create_container(self, def create_container(self,
one_off=False, one_off=False,
@ -399,50 +366,119 @@ class Service(object):
return has_diverged return has_diverged
def execute_convergence_plan(self, def _execute_convergence_create(self, scale, detached, start):
plan, i = self._next_container_number()
timeout=None,
detached=False,
start=True):
(action, containers) = plan
should_attach_logs = not detached
if action == 'create': def create_and_start(service, n):
container = self.create_container() container = service.create_container(number=n)
if not detached:
if should_attach_logs:
container.attach_log_stream() container.attach_log_stream()
if start: if start:
self.start_container(container) self.start_container(container)
return container
return [container] containers, errors = parallel_execute(
range(i, i + scale),
elif action == 'recreate': lambda n: create_and_start(self, n),
return [ lambda n: self.get_container_name(n),
self.recreate_container( "Creating"
container,
timeout=timeout,
attach_logs=should_attach_logs,
start_new_container=start
) )
for container in containers for error in errors.values():
] raise OperationFailedError(error)
elif action == 'start':
if start:
for container in containers:
self.start_container_if_stopped(container, attach_logs=should_attach_logs)
return containers return containers
elif action == 'noop': def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
if len(containers) > scale:
self._downscale(containers[scale:], timeout)
containers = containers[:scale]
def recreate(container):
return self.recreate_container(
container, timeout=timeout, attach_logs=not detached,
start_new_container=start
)
containers, errors = parallel_execute(
containers,
recreate,
lambda c: c.name,
"Recreating"
)
for error in errors.values():
raise OperationFailedError(error)
if len(containers) < scale:
containers.extend(self._execute_convergence_create(
scale - len(containers), detached, start
))
return containers
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
if len(containers) > scale:
self._downscale(containers[scale:], timeout)
containers = containers[:scale]
if start:
_, errors = parallel_execute(
containers,
lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
lambda c: c.name,
"Starting"
)
for error in errors.values():
raise OperationFailedError(error)
if len(containers) < scale:
containers.extend(self._execute_convergence_create(
scale - len(containers), detached, start
))
return containers
def _downscale(self, containers, timeout=None):
def stop_and_remove(container):
container.stop(timeout=self.stop_timeout(timeout))
container.remove()
parallel_execute(
containers,
stop_and_remove,
lambda c: c.name,
"Stopping and removing",
)
def execute_convergence_plan(self, plan, timeout=None, detached=False,
start=True, scale_override=None):
(action, containers) = plan
scale = scale_override if scale_override is not None else self.scale_num
containers = sorted(containers, key=attrgetter('number'))
self.show_scale_warnings(scale)
if action == 'create':
return self._execute_convergence_create(
scale, detached, start
)
if action == 'recreate':
return self._execute_convergence_recreate(
containers, scale, timeout, detached, start
)
if action == 'start':
return self._execute_convergence_start(
containers, scale, timeout, detached, start
)
if action == 'noop':
if scale != len(containers):
return self._execute_convergence_start(
containers, scale, timeout, detached, start
)
for c in containers: for c in containers:
log.info("%s is up-to-date" % c.name) log.info("%s is up-to-date" % c.name)
return containers return containers
else:
raise Exception("Invalid action: {}".format(action)) raise Exception("Invalid action: {}".format(action))
def recreate_container( def recreate_container(
@ -729,8 +765,8 @@ class Service(object):
number, number,
self.config_hash if add_config_hash else None) self.config_hash if add_config_hash else None)
# Delete options which are only used when starting # Delete options which are only used in HostConfig
for key in DOCKER_START_KEYS: for key in HOST_CONFIG_KEYS:
container_options.pop(key, None) container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config( container_options['host_config'] = self._get_container_host_config(
@ -750,8 +786,12 @@ class Service(object):
logging_dict = options.get('logging', None) logging_dict = options.get('logging', None)
log_config = get_log_config(logging_dict) log_config = get_log_config(logging_dict)
init_path = None
if isinstance(options.get('init'), six.string_types):
init_path = options.get('init')
options['init'] = True
host_config = self.client.create_host_config( return self.client.create_host_config(
links=self._get_links(link_to_self=one_off), links=self._get_links(link_to_self=one_off),
port_bindings=build_port_bindings( port_bindings=build_port_bindings(
formatted_ports(options.get('ports', [])) formatted_ports(options.get('ports', []))
@ -786,15 +826,12 @@ class Service(object):
oom_score_adj=options.get('oom_score_adj'), oom_score_adj=options.get('oom_score_adj'),
mem_swappiness=options.get('mem_swappiness'), mem_swappiness=options.get('mem_swappiness'),
group_add=options.get('group_add'), group_add=options.get('group_add'),
userns_mode=options.get('userns_mode') userns_mode=options.get('userns_mode'),
init=options.get('init', None),
init_path=init_path,
isolation=options.get('isolation'),
) )
# TODO: Add as an argument to create_host_config once it's supported
# in docker-py
host_config['Isolation'] = options.get('isolation')
return host_config
def get_secret_volumes(self): def get_secret_volumes(self):
def build_spec(secret): def build_spec(secret):
target = '{}/{}'.format( target = '{}/{}'.format(

View File

@ -32,6 +32,11 @@ exe = EXE(pyz,
'compose/config/config_schema_v2.1.json', 'compose/config/config_schema_v2.1.json',
'DATA' 'DATA'
), ),
(
'compose/config/config_schema_v2.2.json',
'compose/config/config_schema_v2.2.json',
'DATA'
),
( (
'compose/config/config_schema_v3.0.json', 'compose/config/config_schema_v3.0.json',
'compose/config/config_schema_v3.0.json', 'compose/config/config_schema_v3.0.json',

View File

@ -15,7 +15,7 @@
set -e set -e
VERSION="1.12.0" VERSION="1.13.0-rc1"
IMAGE="docker/compose:$VERSION" IMAGE="docker/compose:$VERSION"

View File

@ -151,7 +151,7 @@ class CLITestCase(DockerClientTestCase):
def test_help(self): def test_help(self):
self.base_dir = 'tests/fixtures/no-composefile' self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'up'], returncode=0) result = self.dispatch(['help', 'up'], returncode=0)
assert 'Usage: up [options] [SERVICE...]' in result.stdout assert 'Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]' in result.stdout
# Prevent tearDown from trying to create a project # Prevent tearDown from trying to create a project
self.base_dir = None self.base_dir = None
@ -323,6 +323,7 @@ class CLITestCase(DockerClientTestCase):
assert yaml.load(result.stdout) == { assert yaml.load(result.stdout) == {
'version': '3.2', 'version': '3.2',
'networks': {}, 'networks': {},
'secrets': {},
'volumes': { 'volumes': {
'foobar': { 'foobar': {
'labels': { 'labels': {
@ -1865,6 +1866,59 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(project.get_service('simple').containers()), 0) self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0) self.assertEqual(len(project.get_service('another').containers()), 0)
def test_scale_v2_2(self):
self.base_dir = 'tests/fixtures/scale'
result = self.dispatch(['scale', 'web=1'], returncode=1)
assert 'incompatible with the v2.2 format' in result.stderr
def test_up_scale_scale_up(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
self.dispatch(['up', '-d', '--scale', 'web=3'])
assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 1
def test_up_scale_scale_down(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
self.dispatch(['up', '-d', '--scale', 'web=1'])
assert len(project.get_service('web').containers()) == 1
assert len(project.get_service('db').containers()) == 1
def test_up_scale_reset(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 3
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
def test_up_scale_to_zero(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
assert len(project.get_service('web').containers()) == 0
assert len(project.get_service('db').containers()) == 0
def test_port(self): def test_port(self):
self.base_dir = 'tests/fixtures/ports-composefile' self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['up', '-d'], None) self.dispatch(['up', '-d'], None)

View File

@ -0,0 +1,9 @@
version: '2.2'
services:
web:
image: busybox
command: top
scale: 2
db:
image: busybox
command: top

View File

@ -19,6 +19,7 @@ from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec from compose.config.types import VolumeSpec
from compose.const import COMPOSEFILE_V2_0 as V2_0 from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_1 as V2_1 from compose.const import COMPOSEFILE_V2_1 as V2_1
from compose.const import COMPOSEFILE_V2_2 as V2_2
from compose.const import COMPOSEFILE_V3_1 as V3_1 from compose.const import COMPOSEFILE_V3_1 as V3_1
from compose.const import LABEL_PROJECT from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE from compose.const import LABEL_SERVICE
@ -564,12 +565,12 @@ class ProjectTest(DockerClientTestCase):
self.assertEqual(len(service.containers()), 3) self.assertEqual(len(service.containers()), 3)
project.up() project.up()
service = project.get_service('web') service = project.get_service('web')
self.assertEqual(len(service.containers()), 3) self.assertEqual(len(service.containers()), 1)
service.scale(1) service.scale(1)
self.assertEqual(len(service.containers()), 1) self.assertEqual(len(service.containers()), 1)
project.up() project.up(scale_override={'web': 3})
service = project.get_service('web') service = project.get_service('web')
self.assertEqual(len(service.containers()), 1) self.assertEqual(len(service.containers()), 3)
# does scale=0 ,makes any sense? after recreating at least 1 container is running # does scale=0 ,makes any sense? after recreating at least 1 container is running
service.scale(0) service.scale(0)
project.up() project.up()
@ -681,6 +682,41 @@ class ProjectTest(DockerClientTestCase):
}], }],
} }
@v2_only()
def test_up_with_ipam_options(self):
config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
'image': 'busybox:latest',
'networks': {'front': None},
}],
networks={
'front': {
'driver': 'bridge',
'ipam': {
'driver': 'default',
'options': {
"com.docker.compose.network.test": "9-29-045"
}
},
},
},
)
project = Project.from_config(
client=self.client,
name='composetest',
config_data=config_data,
)
project.up()
network = self.client.networks(names=['composetest_front'])[0]
assert network['IPAM']['Options'] == {
"com.docker.compose.network.test": "9-29-045"
}
@v2_only() @v2_only()
def test_up_with_network_static_addresses(self): def test_up_with_network_static_addresses(self):
config_data = build_config( config_data = build_config(
@ -1102,6 +1138,33 @@ class ProjectTest(DockerClientTestCase):
containers = project.containers() containers = project.containers()
self.assertEqual(len(containers), 1) self.assertEqual(len(containers), 1)
def test_project_up_config_scale(self):
config_data = build_config(
version=V2_2,
services=[{
'name': 'web',
'image': 'busybox:latest',
'command': 'top',
'scale': 3
}]
)
project = Project.from_config(
name='composetest', config_data=config_data, client=self.client
)
project.up()
assert len(project.containers()) == 3
project.up(scale_override={'web': 2})
assert len(project.containers()) == 2
project.up(scale_override={'web': 4})
assert len(project.containers()) == 4
project.stop()
project.up()
assert len(project.containers()) == 3
@v2_only() @v2_only()
def test_initialize_volumes(self): def test_initialize_volumes(self):
vol_name = '{0:x}'.format(random.getrandbits(32)) vol_name = '{0:x}'.format(random.getrandbits(32))

View File

@ -4,6 +4,7 @@ from __future__ import unicode_literals
import os import os
import shutil import shutil
import tempfile import tempfile
from distutils.spawn import find_executable
from os import path from os import path
import pytest import pytest
@ -25,6 +26,7 @@ from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE from compose.const import LABEL_SERVICE
from compose.const import LABEL_VERSION from compose.const import LABEL_VERSION
from compose.container import Container from compose.container import Container
from compose.errors import OperationFailedError
from compose.project import OneOffFilter from compose.project import OneOffFilter
from compose.service import ConvergencePlan from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy from compose.service import ConvergenceStrategy
@ -115,6 +117,21 @@ class ServiceTest(DockerClientTestCase):
service.start_container(container) service.start_container(container)
self.assertEqual(container.get('HostConfig.ShmSize'), 67108864) self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
def test_create_container_with_init_bool(self):
self.require_api_version('1.25')
service = self.create_service('db', init=True)
container = service.create_container()
service.start_container(container)
assert container.get('HostConfig.Init') is True
def test_create_container_with_init_path(self):
self.require_api_version('1.25')
docker_init_path = find_executable('docker-init')
service = self.create_service('db', init=docker_init_path)
container = service.create_container()
service.start_container(container)
assert container.get('HostConfig.InitPath') == docker_init_path
@pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit') @pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit')
def test_create_container_with_pids_limit(self): def test_create_container_with_pids_limit(self):
self.require_api_version('1.23') self.require_api_version('1.23')
@ -761,15 +778,15 @@ class ServiceTest(DockerClientTestCase):
message="testing", message="testing",
response={}, response={},
explanation="Boom")): explanation="Boom")):
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr: with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
with pytest.raises(OperationFailedError):
service.scale(3) service.scale(3)
self.assertEqual(len(service.containers()), 1) assert len(service.containers()) == 1
self.assertTrue(service.containers()[0].is_running) assert service.containers()[0].is_running
self.assertIn( assert (
"ERROR: for composetest_web_2 Cannot create container for service web: Boom", "ERROR: for composetest_web_2 Cannot create container for service"
mock_stderr.getvalue() " web: Boom" in mock_stderr.getvalue()
) )
def test_scale_with_unexpected_exception(self): def test_scale_with_unexpected_exception(self):
@ -821,6 +838,7 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('app', container_name='custom-container') service = self.create_service('app', container_name='custom-container')
self.assertEqual(service.custom_container_name, 'custom-container') self.assertEqual(service.custom_container_name, 'custom-container')
with pytest.raises(OperationFailedError):
service.scale(3) service.scale(3)
captured_output = mock_log.warn.call_args[0][0] captured_output = mock_log.warn.call_args[0][0]

View File

@ -15,7 +15,7 @@ from compose.const import API_VERSIONS
from compose.const import COMPOSEFILE_V1 as V1 from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_0 as V2_0 from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_0 as V2_1 from compose.const import COMPOSEFILE_V2_0 as V2_1
from compose.const import COMPOSEFILE_V3_0 as V3_0 from compose.const import COMPOSEFILE_V3_2 as V3_2
from compose.const import LABEL_PROJECT from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output from compose.progress_stream import stream_output
from compose.service import Service from compose.service import Service
@ -37,7 +37,7 @@ def get_links(container):
def engine_max_version(): def engine_max_version():
if 'DOCKER_VERSION' not in os.environ: if 'DOCKER_VERSION' not in os.environ:
return V3_0 return V3_2
version = os.environ['DOCKER_VERSION'].partition('-')[0] version = os.environ['DOCKER_VERSION'].partition('-')[0]
if version_lt(version, '1.10'): if version_lt(version, '1.10'):
return V1 return V1
@ -45,7 +45,7 @@ def engine_max_version():
return V2_0 return V2_0
if version_lt(version, '1.13'): if version_lt(version, '1.13'):
return V2_1 return V2_1
return V3_0 return V3_2
def build_version_required_decorator(ignored_versions): def build_version_required_decorator(ignored_versions):

View File

@ -1,9 +1,11 @@
# ~*~ encoding: utf-8 ~*~
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
import os import os
import pytest import pytest
import six
from compose.cli.command import get_config_path_from_options from compose.cli.command import get_config_path_from_options
from compose.config.environment import Environment from compose.config.environment import Environment
@ -55,3 +57,20 @@ class TestGetConfigPathFromOptions(object):
def test_no_path(self): def test_no_path(self):
environment = Environment.from_env_file('.') environment = Environment.from_env_file('.')
assert not get_config_path_from_options('.', {}, environment) assert not get_config_path_from_options('.', {}, environment)
def test_unicode_path_from_options(self):
paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
opts = {'--file': paths}
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', opts, environment
) == ['就吃饭/docker-compose.yml']
@pytest.mark.skipif(six.PY3, reason='Env values in Python 3 are already Unicode')
def test_unicode_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options(
'.', {}, environment
) == ['就吃饭/docker-compose.yml']

View File

@ -3837,3 +3837,15 @@ class SerializeTest(unittest.TestCase):
serialized_service = serialized_config['services']['web'] serialized_service = serialized_config['services']['web']
assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets']) assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets'])
assert 'secrets' in serialized_config assert 'secrets' in serialized_config
def test_serialize_ports(self):
config_dict = config.Config(version='2.0', services=[
{
'ports': [types.ServicePort('80', '8080', None, None, None)],
'image': 'alpine',
'name': 'web'
}
], volumes={}, networks={}, secrets={})
serialized_config = yaml.load(serialize_config(config_dict))
assert '8080:80/tcp' in serialized_config['services']['web']['ports']

View File

@ -71,6 +71,16 @@ class TestServicePort(object):
} }
assert ports[0].legacy_repr() == port_def assert ports[0].legacy_repr() == port_def
def test_parse_ext_ip_no_published_port(self):
port_def = '1.1.1.1::3000'
ports = ServicePort.parse(port_def)
assert len(ports) == 1
assert ports[0].legacy_repr() == port_def + '/tcp'
assert ports[0].repr() == {
'target': '3000',
'external_ip': '1.1.1.1',
}
def test_parse_port_range(self): def test_parse_port_range(self):
ports = ServicePort.parse('25000-25001:4000-4001') ports = ServicePort.parse('25000-25001:4000-4001')
assert len(ports) == 2 assert len(ports) == 2