Merge pull request #2384 from dnephin/bump-1.5.1

**WIP** Bump 1.5.1
This commit is contained in:
Daniel Nephin 2015-11-12 17:29:43 -05:00
commit fae20305ec
40 changed files with 1299 additions and 914 deletions

View File

@ -1,6 +1,59 @@
Change log
==========
1.5.1 (2015-11-12)
------------------
- Add the `--force-rm` option to `build`.
- Add the `ulimit` option for services in the Compose file.
- Fixed a bug where `up` would error with "service needs to be built" if
a service changed from using `image` to using `build`.
- Fixed a bug that would cause incorrect output of parallel operations
on some terminals.
- Fixed a bug that prevented a container from being recreated when the
mode of a `volumes_from` was changed.
- Fixed a regression in 1.5.0 where non-utf-8 unicode characters would cause
`up` or `logs` to crash.
- Fixed a regression in 1.5.0 where Compose would use a success exit status
code when a command fails due to an HTTP timeout communicating with the
docker daemon.
- Fixed a regression in 1.5.0 where `name` was being accepted as a valid
service option which would override the actual name of the service.
- When using `--x-networking` Compose no longer sets the hostname to the
container name.
- When using `--x-networking` Compose will only create the default network
if at least one container is using the network.
- When printings logs during `up` or `logs`, flush the output buffer after
each line to prevent buffering issues from hideing logs.
- Recreate a container if one of it's dependencies is being created.
Previously a container was only recreated if it's dependencies already
existed, but were being recreated as well.
- Add a warning when a `volume` in the Compose file is being ignored
and masked by a container volume from a previous container.
- Improve the output of `pull` when run without a tty.
- When using multiple Compose files, validate each before attempting to merge
them together. Previously invalid files would result in not helpful errors.
- Allow dashes in keys in the `environment` service option.
- Improve validation error messages by including the filename as part of the
error message.
1.5.0 (2015-11-03)
------------------

View File

@ -1,3 +1,3 @@
from __future__ import unicode_literals
__version__ = '1.5.0'
__version__ = '1.5.1'

View File

@ -26,6 +26,7 @@ class LogPrinter(object):
generators = list(self._make_log_generators(self.monochrome, prefix_width))
for line in Multiplexer(generators).loop():
self.output.write(line)
self.output.flush()
def _make_log_generators(self, monochrome, prefix_width):
def no_color(text):

View File

@ -13,12 +13,12 @@ from requests.exceptions import ReadTimeout
from .. import __version__
from .. import legacy
from ..config import ConfigurationError
from ..config import parse_environment
from ..const import DEFAULT_TIMEOUT
from ..const import HTTP_TIMEOUT
from ..const import IS_WINDOWS_PLATFORM
from ..progress_stream import StreamOutputError
from ..project import ConfigurationError
from ..project import NoSuchService
from ..service import BuildError
from ..service import ConvergenceStrategy
@ -80,6 +80,7 @@ def main():
"If you encounter this issue regularly because of slow network conditions, consider setting "
"COMPOSE_HTTP_TIMEOUT to a higher value (current value: %s)." % HTTP_TIMEOUT
)
sys.exit(1)
def setup_logging():
@ -180,12 +181,15 @@ class TopLevelCommand(DocoptCommand):
Usage: build [options] [SERVICE...]
Options:
--force-rm Always remove intermediate containers.
--no-cache Do not use cache when building the image.
--pull Always attempt to pull a newer version of the image.
"""
no_cache = bool(options.get('--no-cache', False))
pull = bool(options.get('--pull', False))
project.build(service_names=options['SERVICE'], no_cache=no_cache, pull=pull)
project.build(
service_names=options['SERVICE'],
no_cache=bool(options.get('--no-cache', False)),
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)))
def help(self, project, options):
"""
@ -448,7 +452,7 @@ class TopLevelCommand(DocoptCommand):
raise e
if detach:
service.start_container(container)
container.start()
print(container.name)
else:
dockerpty.start(project.client, container.id, interactive=not options['-T'])

View File

@ -1,5 +1,4 @@
# flake8: noqa
from .config import ConfigDetails
from .config import ConfigurationError
from .config import DOCKER_CONFIG_KEYS
from .config import find

View File

@ -13,7 +13,6 @@ from .errors import ConfigurationError
from .interpolation import interpolate_environment_variables
from .validation import validate_against_fields_schema
from .validation import validate_against_service_schema
from .validation import validate_extended_service_exists
from .validation import validate_extends_file_path
from .validation import validate_top_level_object
@ -66,7 +65,6 @@ ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'dockerfile',
'expose',
'external_links',
'name',
]
@ -99,6 +97,24 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
:type config: :class:`dict`
"""
@classmethod
def from_filename(cls, filename):
return cls(filename, load_yaml(filename))
class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
@classmethod
def with_abs_paths(cls, working_dir, filename, name, config):
if not working_dir:
raise ValueError("No working_dir for ServiceConfig.")
return cls(
os.path.abspath(working_dir),
os.path.abspath(filename) if filename else filename,
name,
config)
def find(base_dir, filenames):
if filenames == ['-']:
@ -114,7 +130,7 @@ def find(base_dir, filenames):
log.debug("Using configuration files: {}".format(",".join(filenames)))
return ConfigDetails(
os.path.dirname(filenames[0]),
[ConfigFile(f, load_yaml(f)) for f in filenames])
[ConfigFile.from_filename(f) for f in filenames])
def get_default_config_files(base_dir):
@ -174,21 +190,22 @@ def load(config_details):
"""
def build_service(filename, service_name, service_dict):
loader = ServiceLoader(
service_config = ServiceConfig.with_abs_paths(
config_details.working_dir,
filename,
service_name,
service_dict)
service_dict = loader.make_service_dict()
resolver = ServiceExtendsResolver(service_config)
service_dict = process_service(resolver.run())
validate_against_service_schema(service_dict, service_config.name)
validate_paths(service_dict)
service_dict['name'] = service_config.name
return service_dict
def load_file(filename, config):
processed_config = interpolate_environment_variables(config)
validate_against_fields_schema(processed_config)
def build_services(config_file):
return [
build_service(filename, name, service_config)
for name, service_config in processed_config.items()
build_service(config_file.filename, name, service_dict)
for name, service_dict in config_file.config.items()
]
def merge_services(base, override):
@ -200,159 +217,163 @@ def load(config_details):
for name in all_service_names
}
config_file = config_details.config_files[0]
validate_top_level_object(config_file.config)
config_file = process_config_file(config_details.config_files[0])
for next_file in config_details.config_files[1:]:
validate_top_level_object(next_file.config)
next_file = process_config_file(next_file)
config_file = ConfigFile(
config_file.filename,
merge_services(config_file.config, next_file.config))
config = merge_services(config_file.config, next_file.config)
config_file = config_file._replace(config=config)
return load_file(config_file.filename, config_file.config)
return build_services(config_file)
class ServiceLoader(object):
def __init__(self, working_dir, filename, service_name, service_dict, already_seen=None):
if working_dir is None:
raise Exception("No working_dir passed to ServiceLoader()")
def process_config_file(config_file, service_name=None):
validate_top_level_object(config_file)
processed_config = interpolate_environment_variables(config_file.config)
validate_against_fields_schema(processed_config, config_file.filename)
self.working_dir = os.path.abspath(working_dir)
if service_name and service_name not in processed_config:
raise ConfigurationError(
"Cannot extend service '{}' in {}: Service not found".format(
service_name, config_file.filename))
if filename:
self.filename = os.path.abspath(filename)
else:
self.filename = filename
return config_file._replace(config=processed_config)
class ServiceExtendsResolver(object):
def __init__(self, service_config, already_seen=None):
self.service_config = service_config
self.working_dir = service_config.working_dir
self.already_seen = already_seen or []
self.service_dict = service_dict.copy()
self.service_name = service_name
self.service_dict['name'] = service_name
def detect_cycle(self, name):
if self.signature(name) in self.already_seen:
raise CircularReference(self.already_seen + [self.signature(name)])
@property
def signature(self):
return self.service_config.filename, self.service_config.name
def make_service_dict(self):
self.resolve_environment()
if 'extends' in self.service_dict:
self.validate_and_construct_extends()
self.service_dict = self.resolve_extends()
def detect_cycle(self):
if self.signature in self.already_seen:
raise CircularReference(self.already_seen + [self.signature])
if not self.already_seen:
validate_against_service_schema(self.service_dict, self.service_name)
def run(self):
self.detect_cycle()
return process_container_options(self.service_dict, working_dir=self.working_dir)
service_dict = dict(self.service_config.config)
env = resolve_environment(self.working_dir, self.service_config.config)
if env:
service_dict['environment'] = env
service_dict.pop('env_file', None)
def resolve_environment(self):
"""
Unpack any environment variables from an env_file, if set.
Interpolate environment values if set.
"""
if 'environment' not in self.service_dict and 'env_file' not in self.service_dict:
return
if 'extends' in service_dict:
service_dict = self.resolve_extends(*self.validate_and_construct_extends())
env = {}
if 'env_file' in self.service_dict:
for f in get_env_files(self.service_dict, working_dir=self.working_dir):
env.update(env_vars_from_file(f))
del self.service_dict['env_file']
env.update(parse_environment(self.service_dict.get('environment')))
env = dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
self.service_dict['environment'] = env
return self.service_config._replace(config=service_dict)
def validate_and_construct_extends(self):
extends = self.service_dict['extends']
extends = self.service_config.config['extends']
if not isinstance(extends, dict):
extends = {'service': extends}
validate_extends_file_path(
self.service_name,
extends,
self.filename
)
self.extended_config_path = self.get_extended_config_path(extends)
self.extended_service_name = extends['service']
config_path = self.get_extended_config_path(extends)
service_name = extends['service']
config = load_yaml(self.extended_config_path)
validate_top_level_object(config)
full_extended_config = interpolate_environment_variables(config)
extended_file = process_config_file(
ConfigFile.from_filename(config_path),
service_name=service_name)
service_config = extended_file.config[service_name]
return config_path, service_config, service_name
validate_extended_service_exists(
self.extended_service_name,
full_extended_config,
self.extended_config_path
)
validate_against_fields_schema(full_extended_config)
def resolve_extends(self, extended_config_path, service_dict, service_name):
resolver = ServiceExtendsResolver(
ServiceConfig.with_abs_paths(
os.path.dirname(extended_config_path),
extended_config_path,
service_name,
service_dict),
already_seen=self.already_seen + [self.signature])
self.extended_config = full_extended_config[self.extended_service_name]
def resolve_extends(self):
other_working_dir = os.path.dirname(self.extended_config_path)
other_already_seen = self.already_seen + [self.signature(self.service_name)]
other_loader = ServiceLoader(
working_dir=other_working_dir,
filename=self.extended_config_path,
service_name=self.service_name,
service_dict=self.extended_config,
already_seen=other_already_seen,
)
other_loader.detect_cycle(self.extended_service_name)
other_service_dict = other_loader.make_service_dict()
service_config = resolver.run()
other_service_dict = process_service(service_config)
validate_extended_service_dict(
other_service_dict,
filename=self.extended_config_path,
service=self.extended_service_name,
extended_config_path,
service_name,
)
return merge_service_dicts(other_service_dict, self.service_dict)
return merge_service_dicts(other_service_dict, self.service_config.config)
def get_extended_config_path(self, extends_options):
"""
Service we are extending either has a value for 'file' set, which we
"""Service we are extending either has a value for 'file' set, which we
need to obtain a full path too or we are extending from a service
defined in our own file.
"""
filename = self.service_config.filename
validate_extends_file_path(
self.service_config.name,
extends_options,
filename)
if 'file' in extends_options:
extends_from_filename = extends_options['file']
return expand_path(self.working_dir, extends_from_filename)
return expand_path(self.working_dir, extends_options['file'])
return filename
return self.filename
def signature(self, name):
return (self.filename, name)
def resolve_environment(working_dir, service_dict):
"""Unpack any environment variables from an env_file, if set.
Interpolate environment values if set.
"""
if 'environment' not in service_dict and 'env_file' not in service_dict:
return {}
env = {}
if 'env_file' in service_dict:
for env_file in get_env_files(working_dir, service_dict):
env.update(env_vars_from_file(env_file))
env.update(parse_environment(service_dict.get('environment')))
return dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
def validate_extended_service_dict(service_dict, filename, service):
error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
if 'links' in service_dict:
raise ConfigurationError("%s services with 'links' cannot be extended" % error_prefix)
raise ConfigurationError(
"%s services with 'links' cannot be extended" % error_prefix)
if 'volumes_from' in service_dict:
raise ConfigurationError("%s services with 'volumes_from' cannot be extended" % error_prefix)
raise ConfigurationError(
"%s services with 'volumes_from' cannot be extended" % error_prefix)
if 'net' in service_dict:
if get_service_name_from_net(service_dict['net']) is not None:
raise ConfigurationError("%s services with 'net: container' cannot be extended" % error_prefix)
raise ConfigurationError(
"%s services with 'net: container' cannot be extended" % error_prefix)
def process_container_options(service_dict, working_dir=None):
service_dict = service_dict.copy()
def validate_ulimits(ulimit_config):
for limit_name, soft_hard_values in six.iteritems(ulimit_config):
if isinstance(soft_hard_values, dict):
if not soft_hard_values['soft'] <= soft_hard_values['hard']:
raise ConfigurationError(
"ulimit_config \"{}\" cannot contain a 'soft' value higher "
"than 'hard' value".format(ulimit_config))
def process_service(service_config):
working_dir = service_config.working_dir
service_dict = dict(service_config.config)
if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
service_dict['volumes'] = resolve_volume_paths(service_dict, working_dir=working_dir)
service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
if 'build' in service_dict:
service_dict['build'] = resolve_build_path(service_dict['build'], working_dir=working_dir)
service_dict['build'] = expand_path(working_dir, service_dict['build'])
if 'labels' in service_dict:
service_dict['labels'] = parse_labels(service_dict['labels'])
if 'ulimits' in service_dict:
validate_ulimits(service_dict['ulimits'])
return service_dict
@ -424,7 +445,7 @@ def merge_environment(base, override):
return env
def get_env_files(options, working_dir=None):
def get_env_files(working_dir, options):
if 'env_file' not in options:
return {}
@ -453,7 +474,7 @@ def parse_environment(environment):
def split_env(env):
if isinstance(env, six.binary_type):
env = env.decode('utf-8')
env = env.decode('utf-8', 'replace')
if '=' in env:
return env.split('=', 1)
else:
@ -484,34 +505,25 @@ def env_vars_from_file(filename):
return env
def resolve_volume_paths(service_dict, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_volume_paths()")
def resolve_volume_paths(working_dir, service_dict):
return [
resolve_volume_path(v, working_dir, service_dict['name'])
for v in service_dict['volumes']
resolve_volume_path(working_dir, volume)
for volume in service_dict['volumes']
]
def resolve_volume_path(volume, working_dir, service_name):
def resolve_volume_path(working_dir, volume):
container_path, host_path = split_path_mapping(volume)
if host_path is not None:
if host_path.startswith('.'):
host_path = expand_path(working_dir, host_path)
host_path = os.path.expanduser(host_path)
return "{}:{}".format(host_path, container_path)
return u"{}:{}".format(host_path, container_path)
else:
return container_path
def resolve_build_path(build_path, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_build_path")
return expand_path(working_dir, build_path)
def validate_paths(service_dict):
if 'build' in service_dict:
build_path = service_dict['build']
@ -578,7 +590,7 @@ def parse_labels(labels):
return dict(split_label(e) for e in labels)
if isinstance(labels, dict):
return labels
return dict(labels)
def split_label(label):

View File

@ -2,15 +2,18 @@
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"id": "fields_schema.json",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
@ -40,7 +43,7 @@
{
"type": "object",
"patternProperties": {
"^[^-]+$": {
".+": {
"type": ["string", "number", "boolean", "null"],
"format": "environment"
}
@ -89,7 +92,6 @@
"mac_address": {"type": "string"},
"mem_limit": {"type": ["number", "string"]},
"memswap_limit": {"type": ["number", "string"]},
"name": {"type": "string"},
"net": {"type": "string"},
"pid": {"type": ["string", "null"]},
@ -116,6 +118,25 @@
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"stdin_open": {"type": "boolean"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"volume_driver": {"type": "string"},
@ -149,6 +170,5 @@
]
}
},
"additionalProperties": false
}
}

View File

@ -18,13 +18,6 @@ def interpolate_environment_variables(config):
def process_service(service_name, service_dict, mapping):
if not isinstance(service_dict, dict):
raise ConfigurationError(
'Service "%s" doesn\'t have any configuration options. '
'All top level keys in your docker-compose.yml must map '
'to a dictionary of configuration options.' % service_name
)
return dict(
(key, interpolate_value(service_name, key, val, mapping))
for (key, val) in service_dict.items()

View File

@ -1,21 +1,17 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "service_schema.json",
"type": "object",
"properties": {
"name": {"type": "string"}
},
"required": ["name"],
"allOf": [
{"$ref": "fields_schema.json#/definitions/service"},
{"$ref": "#/definitions/service_constraints"}
{"$ref": "#/definitions/constraints"}
],
"definitions": {
"service_constraints": {
"constraints": {
"id": "#/definitions/constraints",
"anyOf": [
{
"required": ["build"],
@ -27,13 +23,8 @@
{"required": ["build"]},
{"required": ["dockerfile"]}
]}
},
{
"required": ["extends"],
"not": {"required": ["build", "image"]}
}
]
}
}
}

View File

@ -66,21 +66,38 @@ def format_boolean_in_environment(instance):
return True
def validate_service_names(config):
for service_name in config.keys():
def validate_top_level_service_objects(config_file):
"""Perform some high level validation of the service name and value.
This validation must happen before interpolation, which must happen
before the rest of validation, which is why it's separate from the
rest of the service validation.
"""
for service_name, service_dict in config_file.config.items():
if not isinstance(service_name, six.string_types):
raise ConfigurationError(
"Service name: {} needs to be a string, eg '{}'".format(
"In file '{}' service name: {} needs to be a string, eg '{}'".format(
config_file.filename,
service_name,
service_name))
if not isinstance(service_dict, dict):
raise ConfigurationError(
"In file '{}' service '{}' doesn\'t have any configuration options. "
"All top level keys in your docker-compose.yml must map "
"to a dictionary of configuration options.".format(
config_file.filename,
service_name))
def validate_top_level_object(config):
if not isinstance(config, dict):
def validate_top_level_object(config_file):
if not isinstance(config_file.config, dict):
raise ConfigurationError(
"Top level object needs to be a dictionary. Check your .yml file "
"that you have defined a service at the top level.")
validate_service_names(config)
"Top level object in '{}' needs to be an object not '{}'. Check "
"that you have defined a service at the top level.".format(
config_file.filename,
type(config_file.config)))
validate_top_level_service_objects(config_file)
def validate_extends_file_path(service_name, extends_options, filename):
@ -96,14 +113,6 @@ def validate_extends_file_path(service_name, extends_options, filename):
)
def validate_extended_service_exists(extended_service_name, full_extended_config, extended_config_path):
if extended_service_name not in full_extended_config:
msg = (
"Cannot extend service '%s' in %s: Service not found"
) % (extended_service_name, extended_config_path)
raise ConfigurationError(msg)
def get_unsupported_config_msg(service_name, error_key):
msg = "Unsupported config option for '{}' service: '{}'".format(service_name, error_key)
if error_key in DOCKER_CONFIG_HINTS:
@ -117,189 +126,171 @@ def anglicize_validator(validator):
return 'a ' + validator
def process_errors(errors, service_name=None):
def handle_error_for_schema_with_id(error, service_name):
schema_id = error.schema['id']
if schema_id == 'fields_schema.json' and error.validator == 'additionalProperties':
return "Invalid service name '{}' - only {} characters are allowed".format(
# The service_name is the key to the json object
list(error.instance)[0],
VALID_NAME_CHARS)
if schema_id == '#/definitions/constraints':
if 'image' in error.instance and 'build' in error.instance:
return (
"Service '{}' has both an image and build path specified. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
if 'image' not in error.instance and 'build' not in error.instance:
return (
"Service '{}' has neither an image nor a build path "
"specified. Exactly one must be provided.".format(service_name))
if 'image' in error.instance and 'dockerfile' in error.instance:
return (
"Service '{}' has both an image and alternate Dockerfile. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
if schema_id == '#/definitions/service':
if error.validator == 'additionalProperties':
invalid_config_key = parse_key_from_error_msg(error)
return get_unsupported_config_msg(service_name, invalid_config_key)
def handle_generic_service_error(error, service_name):
config_key = " ".join("'%s'" % k for k in error.path)
msg_format = None
error_msg = error.message
if error.validator == 'oneOf':
msg_format = "Service '{}' configuration key {} {}"
error_msg = _parse_oneof_validator(error)
elif error.validator == 'type':
msg_format = ("Service '{}' configuration key {} contains an invalid "
"type, it should be {}")
error_msg = _parse_valid_types_from_validator(error.validator_value)
# TODO: no test case for this branch, there are no config options
# which exercise this branch
elif error.validator == 'required':
msg_format = "Service '{}' configuration key '{}' is invalid, {}"
elif error.validator == 'dependencies':
msg_format = "Service '{}' configuration key '{}' is invalid: {}"
config_key = list(error.validator_value.keys())[0]
required_keys = ",".join(error.validator_value[config_key])
error_msg = "when defining '{}' you must set '{}' as well".format(
config_key,
required_keys)
elif error.path:
msg_format = "Service '{}' configuration key {} value {}"
if msg_format:
return msg_format.format(service_name, config_key, error_msg)
return error.message
def parse_key_from_error_msg(error):
return error.message.split("'")[1]
def _parse_valid_types_from_validator(validator):
"""A validator value can be either an array of valid types or a string of
a valid type. Parse the valid types and prefix with the correct article.
"""
jsonschema gives us an error tree full of information to explain what has
if not isinstance(validator, list):
return anglicize_validator(validator)
if len(validator) == 1:
return anglicize_validator(validator[0])
return "{}, or {}".format(
", ".join([anglicize_validator(validator[0])] + validator[1:-1]),
anglicize_validator(validator[-1]))
def _parse_oneof_validator(error):
"""oneOf has multiple schemas, so we need to reason about which schema, sub
schema or constraint the validation is failing on.
Inspecting the context value of a ValidationError gives us information about
which sub schema failed and which kind of error it is.
"""
types = []
for context in error.context:
if context.validator == 'required':
return context.message
if context.validator == 'additionalProperties':
invalid_config_key = parse_key_from_error_msg(context)
return "contains unsupported option: '{}'".format(invalid_config_key)
if context.path:
invalid_config_key = " ".join(
"'{}' ".format(fragment) for fragment in context.path
if isinstance(fragment, six.string_types)
)
return "{}contains {}, which is an invalid type, it should be {}".format(
invalid_config_key,
context.instance,
_parse_valid_types_from_validator(context.validator_value))
if context.validator == 'uniqueItems':
return "contains non unique items, please remove duplicates from {}".format(
context.instance)
if context.validator == 'type':
types.append(context.validator_value)
valid_types = _parse_valid_types_from_validator(types)
return "contains an invalid type, it should be {}".format(valid_types)
def process_errors(errors, service_name=None):
"""jsonschema gives us an error tree full of information to explain what has
gone wrong. Process each error and pull out relevant information and re-write
helpful error messages that are relevant.
"""
def _parse_key_from_error_msg(error):
return error.message.split("'")[1]
def format_error_message(error, service_name):
if not service_name and error.path:
# field_schema errors will have service name on the path
service_name = error.path.popleft()
def _clean_error_message(message):
return message.replace("u'", "'")
if 'id' in error.schema:
error_msg = handle_error_for_schema_with_id(error, service_name)
if error_msg:
return error_msg
def _parse_valid_types_from_validator(validator):
"""
A validator value can be either an array of valid types or a string of
a valid type. Parse the valid types and prefix with the correct article.
"""
if isinstance(validator, list):
if len(validator) >= 2:
first_type = anglicize_validator(validator[0])
last_type = anglicize_validator(validator[-1])
types_from_validator = ", ".join([first_type] + validator[1:-1])
return handle_generic_service_error(error, service_name)
msg = "{} or {}".format(
types_from_validator,
last_type
)
else:
msg = "{}".format(anglicize_validator(validator[0]))
else:
msg = "{}".format(anglicize_validator(validator))
return msg
def _parse_oneof_validator(error):
"""
oneOf has multiple schemas, so we need to reason about which schema, sub
schema or constraint the validation is failing on.
Inspecting the context value of a ValidationError gives us information about
which sub schema failed and which kind of error it is.
"""
required = [context for context in error.context if context.validator == 'required']
if required:
return required[0].message
additionalProperties = [context for context in error.context if context.validator == 'additionalProperties']
if additionalProperties:
invalid_config_key = _parse_key_from_error_msg(additionalProperties[0])
return "contains unsupported option: '{}'".format(invalid_config_key)
constraint = [context for context in error.context if len(context.path) > 0]
if constraint:
valid_types = _parse_valid_types_from_validator(constraint[0].validator_value)
invalid_config_key = "".join(
"'{}' ".format(fragment) for fragment in constraint[0].path
if isinstance(fragment, six.string_types)
)
msg = "{}contains {}, which is an invalid type, it should be {}".format(
invalid_config_key,
constraint[0].instance,
valid_types
)
return msg
uniqueness = [context for context in error.context if context.validator == 'uniqueItems']
if uniqueness:
msg = "contains non unique items, please remove duplicates from {}".format(
uniqueness[0].instance
)
return msg
types = [context.validator_value for context in error.context if context.validator == 'type']
valid_types = _parse_valid_types_from_validator(types)
msg = "contains an invalid type, it should be {}".format(valid_types)
return msg
root_msgs = []
invalid_keys = []
required = []
type_errors = []
other_errors = []
for error in errors:
# handle root level errors
if len(error.path) == 0 and not error.instance.get('name'):
if error.validator == 'type':
msg = "Top level object needs to be a dictionary. Check your .yml file that you have defined a service at the top level."
root_msgs.append(msg)
elif error.validator == 'additionalProperties':
invalid_service_name = _parse_key_from_error_msg(error)
msg = "Invalid service name '{}' - only {} characters are allowed".format(invalid_service_name, VALID_NAME_CHARS)
root_msgs.append(msg)
else:
root_msgs.append(_clean_error_message(error.message))
else:
if not service_name:
# field_schema errors will have service name on the path
service_name = error.path[0]
error.path.popleft()
else:
# service_schema errors have the service name passed in, as that
# is not available on error.path or necessarily error.instance
service_name = service_name
if error.validator == 'additionalProperties':
invalid_config_key = _parse_key_from_error_msg(error)
invalid_keys.append(get_unsupported_config_msg(service_name, invalid_config_key))
elif error.validator == 'anyOf':
if 'image' in error.instance and 'build' in error.instance:
required.append(
"Service '{}' has both an image and build path specified. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
elif 'image' not in error.instance and 'build' not in error.instance:
required.append(
"Service '{}' has neither an image nor a build path "
"specified. Exactly one must be provided.".format(service_name))
elif 'image' in error.instance and 'dockerfile' in error.instance:
required.append(
"Service '{}' has both an image and alternate Dockerfile. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
else:
required.append(_clean_error_message(error.message))
elif error.validator == 'oneOf':
config_key = error.path[0]
msg = _parse_oneof_validator(error)
type_errors.append("Service '{}' configuration key '{}' {}".format(
service_name, config_key, msg)
)
elif error.validator == 'type':
msg = _parse_valid_types_from_validator(error.validator_value)
if len(error.path) > 0:
config_key = " ".join(["'%s'" % k for k in error.path])
type_errors.append(
"Service '{}' configuration key {} contains an invalid "
"type, it should be {}".format(
service_name,
config_key,
msg))
else:
root_msgs.append(
"Service '{}' doesn\'t have any configuration options. "
"All top level keys in your docker-compose.yml must map "
"to a dictionary of configuration options.'".format(service_name))
elif error.validator == 'required':
config_key = error.path[0]
required.append(
"Service '{}' option '{}' is invalid, {}".format(
service_name,
config_key,
_clean_error_message(error.message)))
elif error.validator == 'dependencies':
dependency_key = list(error.validator_value.keys())[0]
required_keys = ",".join(error.validator_value[dependency_key])
required.append("Invalid '{}' configuration for '{}' service: when defining '{}' you must set '{}' as well".format(
dependency_key, service_name, dependency_key, required_keys))
else:
config_key = " ".join(["'%s'" % k for k in error.path])
err_msg = "Service '{}' configuration key {} value {}".format(service_name, config_key, error.message)
other_errors.append(err_msg)
return "\n".join(root_msgs + invalid_keys + required + type_errors + other_errors)
return '\n'.join(format_error_message(error, service_name) for error in errors)
def validate_against_fields_schema(config):
schema_filename = "fields_schema.json"
format_checkers = ["ports", "environment"]
return _validate_against_schema(config, schema_filename, format_checkers)
def validate_against_fields_schema(config, filename):
_validate_against_schema(
config,
"fields_schema.json",
format_checker=["ports", "environment"],
filename=filename)
def validate_against_service_schema(config, service_name):
schema_filename = "service_schema.json"
format_checkers = ["ports"]
return _validate_against_schema(config, schema_filename, format_checkers, service_name)
_validate_against_schema(
config,
"service_schema.json",
format_checker=["ports"],
service_name=service_name)
def _validate_against_schema(config, schema_filename, format_checker=[], service_name=None):
def _validate_against_schema(
config,
schema_filename,
format_checker=(),
service_name=None,
filename=None):
config_source_dir = os.path.dirname(os.path.abspath(__file__))
if sys.platform == "win32":
@ -315,9 +306,17 @@ def _validate_against_schema(config, schema_filename, format_checker=[], service
schema = json.load(schema_fh)
resolver = RefResolver(resolver_full_path, schema)
validation_output = Draft4Validator(schema, resolver=resolver, format_checker=FormatChecker(format_checker))
validation_output = Draft4Validator(
schema,
resolver=resolver,
format_checker=FormatChecker(format_checker))
errors = [error for error in sorted(validation_output.iter_errors(config), key=str)]
if errors:
error_msg = process_errors(errors, service_name)
raise ConfigurationError("Validation failed, reason(s):\n{}".format(error_msg))
if not errors:
return
error_msg = process_errors(errors, service_name)
file_msg = " in file '{}'".format(filename) if filename else ''
raise ConfigurationError("Validation failed{}, reason(s):\n{}".format(
file_msg,
error_msg))

View File

@ -14,26 +14,34 @@ def stream_output(output, stream):
for event in utils.json_stream(output):
all_events.append(event)
is_progress_event = 'progress' in event or 'progressDetail' in event
if 'progress' in event or 'progressDetail' in event:
image_id = event.get('id')
if not image_id:
continue
if not is_progress_event:
print_output_event(event, stream, is_terminal)
stream.flush()
continue
if image_id in lines:
diff = len(lines) - lines[image_id]
else:
lines[image_id] = len(lines)
stream.write("\n")
diff = 0
if not is_terminal:
continue
if is_terminal:
# move cursor up `diff` rows
stream.write("%c[%dA" % (27, diff))
# if it's a progress event and we have a terminal, then display the progress bars
image_id = event.get('id')
if not image_id:
continue
if image_id in lines:
diff = len(lines) - lines[image_id]
else:
lines[image_id] = len(lines)
stream.write("\n")
diff = 0
# move cursor up `diff` rows
stream.write("%c[%dA" % (27, diff))
print_output_event(event, stream, is_terminal)
if 'id' in event and is_terminal:
if 'id' in event:
# move cursor back down
stream.write("%c[%dB" % (27, diff))

View File

@ -278,10 +278,10 @@ class Project(object):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False, pull=False):
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache, pull)
service.build(no_cache, pull, force_rm)
else:
log.info('%s uses an image, skipping' % service.name)
@ -300,7 +300,7 @@ class Project(object):
plans = self._get_convergence_plans(services, strategy)
if self.use_networking:
if self.use_networking and self.uses_default_network():
self.ensure_network_exists()
return [
@ -322,7 +322,7 @@ class Project(object):
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
and plans[name].action in ('recreate', 'create')
]
if updated_dependencies and strategy.allows_recreate:
@ -383,7 +383,10 @@ class Project(object):
def remove_network(self):
network = self.get_network()
if network:
self.client.remove_network(network['id'])
self.client.remove_network(network['Id'])
def uses_default_network(self):
return any(service.net.mode == self.name for service in self.services)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()

View File

@ -300,9 +300,7 @@ class Service(object):
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
)
self.ensure_image_exists(do_build=do_build)
container_options = self._get_container_create_options(
override_options,
@ -316,9 +314,7 @@ class Service(object):
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True):
def ensure_image_exists(self, do_build=True):
try:
self.image()
return
@ -410,7 +406,7 @@ class Service(object):
if should_attach_logs:
container.attach_log_stream()
self.start_container(container)
container.start()
return [container]
@ -418,6 +414,7 @@ class Service(object):
return [
self.recreate_container(
container,
do_build=do_build,
timeout=timeout,
attach_logs=should_attach_logs
)
@ -439,10 +436,12 @@ class Service(object):
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
timeout=DEFAULT_TIMEOUT,
attach_logs=False):
def recreate_container(
self,
container,
do_build=False,
timeout=DEFAULT_TIMEOUT,
attach_logs=False):
"""Recreate a container.
The original container is renamed to a temporary name so that data
@ -454,28 +453,23 @@ class Service(object):
container.stop(timeout=timeout)
container.rename_to_tmp_name()
new_container = self.create_container(
do_build=False,
do_build=do_build,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
if attach_logs:
new_container.attach_log_stream()
self.start_container(new_container)
new_container.start()
container.remove()
return new_container
def start_container_if_stopped(self, container, attach_logs=False):
if container.is_running:
return container
else:
if not container.is_running:
log.info("Starting %s" % container.name)
if attach_logs:
container.attach_log_stream()
return self.start_container(container)
def start_container(self, container):
container.start()
container.start()
return container
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
@ -508,7 +502,9 @@ class Service(object):
'image_id': self.image()['Id'],
'links': self.get_link_names(),
'net': self.net.id,
'volumes_from': self.get_volumes_from_names(),
'volumes_from': [
(v.source.name, v.mode) for v in self.volumes_from if isinstance(v.source, Service)
],
}
def get_dependency_names(self):
@ -605,9 +601,6 @@ class Service(object):
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'hostname' not in container_options and self.use_networking:
container_options['hostname'] = self.name
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
@ -683,6 +676,7 @@ class Service(object):
devices = options.get('devices', None)
cgroup_parent = options.get('cgroup_parent', None)
ulimits = build_ulimits(options.get('ulimits', None))
return self.client.create_host_config(
links=self._get_links(link_to_self=one_off),
@ -699,6 +693,7 @@ class Service(object):
cap_drop=cap_drop,
mem_limit=options.get('mem_limit'),
memswap_limit=options.get('memswap_limit'),
ulimits=ulimits,
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
@ -708,7 +703,7 @@ class Service(object):
cgroup_parent=cgroup_parent
)
def build(self, no_cache=False, pull=False):
def build(self, no_cache=False, pull=False, force_rm=False):
log.info('Building %s' % self.name)
path = self.options['build']
@ -722,6 +717,7 @@ class Service(object):
tag=self.image_name,
stream=True,
rm=True,
forcerm=force_rm,
pull=pull,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
@ -899,14 +895,17 @@ def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volumes = [parse_volume_spec(volume) for volume in volumes_option or []]
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
build_volume_binding(volume)
for volume in volumes
if volume.external)
if previous_container:
data_volumes = get_container_data_volumes(previous_container, volumes)
warn_on_masked_volume(volumes, data_volumes, previous_container.service)
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
build_volume_binding(volume) for volume in data_volumes)
return list(volume_bindings.values())
@ -916,13 +915,14 @@ def get_container_data_volumes(container, volumes_option):
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
image_volumes = [
parse_volume_spec(volume)
for volume in
container.image_config['ContainerConfig'].get('Volumes') or {}
]
for volume in set(volumes_option + list(image_volumes)):
volume = parse_volume_spec(volume)
for volume in set(volumes_option + image_volumes):
# No need to preserve host volumes
if volume.external:
continue
@ -934,9 +934,27 @@ def get_container_data_volumes(container, volumes_option):
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
volumes.append(volume)
return dict(volumes)
return volumes
def warn_on_masked_volume(volumes_option, container_volumes, service):
container_volumes = dict(
(volume.internal, volume.external)
for volume in container_volumes)
for volume in volumes_option:
if container_volumes.get(volume.internal) != volume.external:
log.warn((
"Service \"{service}\" is using volume \"{volume}\" from the "
"previous container. Host mapping \"{host_path}\" has no effect. "
"Remove the existing containers (with `docker-compose rm {service}`) "
"to use the host volume mapping."
).format(
service=service,
volume=volume.internal,
host_path=volume.external))
def build_volume_binding(volume_spec):
@ -1058,6 +1076,23 @@ def parse_restart_spec(restart_config):
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Ulimits
def build_ulimits(ulimit_config):
if not ulimit_config:
return None
ulimits = []
for limit_name, soft_hard_values in six.iteritems(ulimit_config):
if isinstance(soft_hard_values, six.integer_types):
ulimits.append({'name': limit_name, 'soft': soft_hard_values, 'hard': soft_hard_values})
elif isinstance(soft_hard_values, dict):
ulimit_dict = {'name': limit_name}
ulimit_dict.update(soft_hard_values)
ulimits.append(ulimit_dict)
return ulimits
# Extra hosts

View File

@ -95,7 +95,7 @@ def stream_as_text(stream):
"""
for data in stream:
if not isinstance(data, six.text_type):
data = data.decode('utf-8')
data = data.decode('utf-8', 'replace')
yield data
@ -164,7 +164,7 @@ def write_out_msg(stream, lines, msg_index, msg, status="done"):
stream.write("%c[%dA" % (27, diff))
# erase
stream.write("%c[2K\r" % 27)
stream.write("{} {} ... {}\n".format(msg, obj_index, status))
stream.write("{} {} ... {}\r".format(msg, obj_index, status))
# move back down
stream.write("%c[%dB" % (27, diff))
else:

View File

@ -87,7 +87,7 @@ __docker_compose_services_stopped() {
_docker_compose_build() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--help --no-cache --pull" -- "$cur" ) )
COMPREPLY=( $( compgen -W "--force-rm --help --no-cache --pull" -- "$cur" ) )
;;
*)
__docker_compose_services_from_build

View File

@ -192,6 +192,7 @@ __docker-compose_subcommand() {
(build)
_arguments \
$opts_help \
'--force-rm[Always remove intermediate containers.]' \
'--no-cache[Do not use cache when building the image]' \
'--pull[Always attempt to pull a newer version of the image.]' \
'*:services:__docker-compose_services_from_build' && ret=0

View File

@ -331,6 +331,18 @@ Override the default labeling scheme for each container.
- label:user:USER
- label:role:ROLE
### ulimits
Override the default ulimits for a container. You can either specify a single
limit as an integer or soft/hard limits as a mapping.
ulimits:
nproc: 65535
nofile:
soft: 20000
hard: 40000
### volumes, volume\_driver
Mount paths as volumes, optionally specifying a path on the host machine

View File

@ -39,7 +39,7 @@ which the release page specifies, in your terminal.
The following is an example command illustrating the format:
curl -L https://github.com/docker/compose/releases/download/VERSION_NUM/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
curl -L https://github.com/docker/compose/releases/download/1.5.1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
If you have problems installing with `curl`, see
[Alternative Install Options](#alternative-install-options).
@ -54,7 +54,7 @@ which the release page specifies, in your terminal.
7. Test the installation.
$ docker-compose --version
docker-compose version: 1.5.0
docker-compose version: 1.5.1
## Alternative install options
@ -76,7 +76,7 @@ to get started.
Compose can also be run inside a container, from a small bash script wrapper.
To install compose as a container run:
$ curl -L https://github.com/docker/compose/releases/download/1.5.0/run.sh > /usr/local/bin/docker-compose
$ curl -L https://github.com/docker/compose/releases/download/1.5.1/run.sh > /usr/local/bin/docker-compose
$ chmod +x /usr/local/bin/docker-compose
## Master builds

View File

@ -15,6 +15,7 @@ parent = "smn_compose_cli"
Usage: build [options] [SERVICE...]
Options:
--force-rm Always remove intermediate containers.
--no-cache Do not use cache when building the image.
--pull Always attempt to pull a newer version of the image.
```

View File

@ -1,4 +1,4 @@
PyYAML==3.10
PyYAML==3.11
docker-py==1.5.0
dockerpty==0.3.4
docopt==0.6.1

View File

@ -15,7 +15,7 @@
set -e
VERSION="1.5.0"
VERSION="1.5.1"
IMAGE="docker/compose:$VERSION"

View File

View File

@ -2,30 +2,31 @@ from __future__ import absolute_import
import os
import shlex
import sys
import subprocess
from collections import namedtuple
from operator import attrgetter
from six import StringIO
from .. import mock
from .testcases import DockerClientTestCase
from compose.cli.command import get_project
from compose.cli.docker_client import docker_client
from compose.cli.errors import UserError
from compose.cli.main import TopLevelCommand
from compose.project import NoSuchService
from compose.container import Container
from tests.integration.testcases import DockerClientTestCase
ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
BUILD_CACHE_TEXT = 'Using cache'
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.old_sys_exit = sys.exit
sys.exit = lambda code=0: None
self.command = TopLevelCommand()
self.command.base_dir = 'tests/fixtures/simple-composefile'
self.base_dir = 'tests/fixtures/simple-composefile'
def tearDown(self):
sys.exit = self.old_sys_exit
self.project.kill()
self.project.remove_stopped()
for container in self.project.containers(stopped=True, one_off=True):
@ -34,129 +35,146 @@ class CLITestCase(DockerClientTestCase):
@property
def project(self):
# Hack: allow project to be overridden. This needs refactoring so that
# the project object is built exactly once, by the command object, and
# accessed by the test case object.
if hasattr(self, '_project'):
return self._project
# Hack: allow project to be overridden
if not hasattr(self, '_project'):
self._project = get_project(self.base_dir)
return self._project
return get_project(self.command.base_dir)
def dispatch(self, options, project_options=None, returncode=0):
project_options = project_options or []
proc = subprocess.Popen(
['docker-compose'] + project_options + options,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.base_dir)
print("Running process: %s" % proc.pid)
stdout, stderr = proc.communicate()
if proc.returncode != returncode:
print(stderr)
assert proc.returncode == returncode
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
def test_help(self):
old_base_dir = self.command.base_dir
self.command.base_dir = 'tests/fixtures/no-composefile'
with self.assertRaises(SystemExit) as exc_context:
self.command.dispatch(['help', 'up'], None)
self.assertIn('Usage: up [options] [SERVICE...]', str(exc_context.exception))
old_base_dir = self.base_dir
self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'up'], returncode=1)
assert 'Usage: up [options] [SERVICE...]' in result.stderr
# self.project.kill() fails during teardown
# unless there is a composefile.
self.command.base_dir = old_base_dir
self.base_dir = old_base_dir
# TODO: address the "Inappropriate ioctl for device" warnings in test output
def test_ps(self):
self.project.get_service('simple').create_container()
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.command.dispatch(['ps'], None)
self.assertIn('simplecomposefile_simple_1', mock_stdout.getvalue())
result = self.dispatch(['ps'])
assert 'simplecomposefile_simple_1' in result.stdout
def test_ps_default_composefile(self):
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.command.dispatch(['up', '-d'], None)
self.command.dispatch(['ps'], None)
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['up', '-d'])
result = self.dispatch(['ps'])
output = mock_stdout.getvalue()
self.assertIn('multiplecomposefiles_simple_1', output)
self.assertIn('multiplecomposefiles_another_1', output)
self.assertNotIn('multiplecomposefiles_yetanother_1', output)
self.assertIn('multiplecomposefiles_simple_1', result.stdout)
self.assertIn('multiplecomposefiles_another_1', result.stdout)
self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout)
def test_ps_alternate_composefile(self):
config_path = os.path.abspath(
'tests/fixtures/multiple-composefiles/compose2.yml')
self._project = get_project(self.command.base_dir, [config_path])
self._project = get_project(self.base_dir, [config_path])
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.command.dispatch(['-f', 'compose2.yml', 'up', '-d'], None)
self.command.dispatch(['-f', 'compose2.yml', 'ps'], None)
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['-f', 'compose2.yml', 'up', '-d'])
result = self.dispatch(['-f', 'compose2.yml', 'ps'])
output = mock_stdout.getvalue()
self.assertNotIn('multiplecomposefiles_simple_1', output)
self.assertNotIn('multiplecomposefiles_another_1', output)
self.assertIn('multiplecomposefiles_yetanother_1', output)
self.assertNotIn('multiplecomposefiles_simple_1', result.stdout)
self.assertNotIn('multiplecomposefiles_another_1', result.stdout)
self.assertIn('multiplecomposefiles_yetanother_1', result.stdout)
@mock.patch('compose.service.log')
def test_pull(self, mock_logging):
self.command.dispatch(['pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (busybox:latest)...')
def test_pull(self):
result = self.dispatch(['pull'])
assert sorted(result.stderr.split('\n'))[1:] == [
'Pulling another (busybox:latest)...',
'Pulling simple (busybox:latest)...',
]
@mock.patch('compose.service.log')
def test_pull_with_digest(self, mock_logging):
self.command.dispatch(['-f', 'digest.yml', 'pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call(
'Pulling digest (busybox@'
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d)...')
def test_pull_with_digest(self):
result = self.dispatch(['-f', 'digest.yml', 'pull'])
@mock.patch('compose.service.log')
def test_pull_with_ignore_pull_failures(self, mock_logging):
self.command.dispatch(['-f', 'ignore-pull-failures.yml', 'pull', '--ignore-pull-failures'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (nonexisting-image:latest)...')
mock_logging.error.assert_any_call('Error: image library/nonexisting-image:latest not found')
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert ('Pulling digest (busybox@'
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
'04ee8502d)...') in result.stderr
def test_pull_with_ignore_pull_failures(self):
result = self.dispatch([
'-f', 'ignore-pull-failures.yml',
'pull', '--ignore-pull-failures'])
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
assert 'Error: image library/nonexisting-image:latest not found' in result.stderr
def test_build_plain(self):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.command.dispatch(['build', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
self.assertNotIn(pull_indicator, output)
result = self.dispatch(['build', 'simple'])
assert BUILD_CACHE_TEXT in result.stdout
assert BUILD_PULL_TEXT not in result.stdout
def test_build_no_cache(self):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.command.dispatch(['build', '--no-cache', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
self.assertNotIn(pull_indicator, output)
result = self.dispatch(['build', '--no-cache', 'simple'])
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT not in result.stdout
def test_build_pull(self):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'], None)
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.command.dispatch(['build', '--pull', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
self.assertIn(pull_indicator, output)
result = self.dispatch(['build', '--pull', 'simple'])
assert BUILD_CACHE_TEXT in result.stdout
assert BUILD_PULL_TEXT in result.stdout
def test_build_no_cache_pull(self):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
with mock.patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.command.dispatch(['build', '--no-cache', '--pull', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
self.assertIn(pull_indicator, output)
result = self.dispatch(['build', '--no-cache', '--pull', 'simple'])
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT in result.stdout
def test_build_failed(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', 'simple'], returncode=1)
labels = ["com.docker.compose.test_failing_image=true"]
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(
all=True,
filters={"label": labels})
]
assert len(containers) == 1
def test_build_failed_forcerm(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
labels = ["com.docker.compose.test_failing_image=true"]
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(
all=True,
filters={"label": labels})
]
assert not containers
def test_up_detached(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
@ -169,28 +187,17 @@ class CLITestCase(DockerClientTestCase):
self.assertFalse(container.get('Config.AttachStdin'))
def test_up_attached(self):
with mock.patch(
'compose.cli.main.attach_to_logs',
autospec=True
) as mock_attach:
self.command.dispatch(['up'], None)
_, args, kwargs = mock_attach.mock_calls[0]
_project, log_printer, _names, _timeout = args
self.base_dir = 'tests/fixtures/echo-services'
result = self.dispatch(['up', '--no-color'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
self.assertEqual(
set(log_printer.containers),
set(self.project.containers())
)
assert 'simple_1 | simple' in result.stdout
assert 'another_1 | another' in result.stdout
def test_up_without_networking(self):
self.require_api_version('1.21')
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d'], None)
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d'], None)
client = docker_client(version='1.21')
networks = client.networks(names=[self.project.name])
@ -207,8 +214,8 @@ class CLITestCase(DockerClientTestCase):
def test_up_with_networking(self):
self.require_api_version('1.21')
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['--x-networking', 'up', '-d'], None)
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['--x-networking', 'up', '-d'], None)
client = docker_client(version='1.21')
services = self.project.get_services()
@ -226,14 +233,13 @@ class CLITestCase(DockerClientTestCase):
containers = service.containers()
self.assertEqual(len(containers), 1)
self.assertIn(containers[0].id, network['Containers'])
self.assertEqual(containers[0].get('Config.Hostname'), service.name)
web_container = self.project.get_service('web').containers()[0]
self.assertFalse(web_container.get('HostConfig.Links'))
def test_up_with_links(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'web'], None)
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
@ -242,8 +248,8 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(console.containers()), 0)
def test_up_with_no_deps(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', '--no-deps', 'web'], None)
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
@ -252,13 +258,13 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(console.containers()), 0)
def test_up_with_force_recreate(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--force-recreate'], None)
self.dispatch(['up', '-d', '--force-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
@ -266,13 +272,13 @@ class CLITestCase(DockerClientTestCase):
self.assertNotEqual(old_ids, new_ids)
def test_up_with_no_recreate(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--no-recreate'], None)
self.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
@ -280,11 +286,12 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(old_ids, new_ids)
def test_up_with_force_recreate_and_no_recreate(self):
with self.assertRaises(UserError):
self.command.dispatch(['up', '-d', '--force-recreate', '--no-recreate'], None)
self.dispatch(
['up', '-d', '--force-recreate', '--no-recreate'],
returncode=1)
def test_up_with_timeout(self):
self.command.dispatch(['up', '-d', '-t', '1'], None)
self.dispatch(['up', '-d', '-t', '1'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
@ -296,10 +303,9 @@ class CLITestCase(DockerClientTestCase):
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
@mock.patch('dockerpty.start')
def test_run_service_without_links(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'console', '/bin/true'], None)
def test_run_service_without_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'console', '/bin/true'])
self.assertEqual(len(self.project.containers()), 0)
# Ensure stdin/out was open
@ -309,44 +315,40 @@ class CLITestCase(DockerClientTestCase):
self.assertTrue(config['AttachStdout'])
self.assertTrue(config['AttachStdin'])
@mock.patch('dockerpty.start')
def test_run_service_with_links(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'web', '/bin/true'], None)
def test_run_service_with_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
@mock.patch('dockerpty.start')
def test_run_with_no_deps(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None)
def test_run_with_no_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
@mock.patch('dockerpty.start')
def test_run_does_not_recreate_linked_containers(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'db'], None)
def test_run_does_not_recreate_linked_containers(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'db'])
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.command.dispatch(['run', 'web', '/bin/true'], None)
self.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
@mock.patch('dockerpty.start')
def test_run_without_command(self, _):
self.command.base_dir = 'tests/fixtures/commands-composefile'
def test_run_without_command(self):
self.base_dir = 'tests/fixtures/commands-composefile'
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
self.command.dispatch(['run', 'implicit'], None)
self.dispatch(['run', 'implicit'])
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
@ -354,7 +356,7 @@ class CLITestCase(DockerClientTestCase):
[u'/bin/sh -c echo "success"'],
)
self.command.dispatch(['run', 'explicit'], None)
self.dispatch(['run', 'explicit'])
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
@ -362,14 +364,10 @@ class CLITestCase(DockerClientTestCase):
[u'/bin/true'],
)
@mock.patch('dockerpty.start')
def test_run_service_with_entrypoint_overridden(self, _):
self.command.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
def test_run_service_with_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.command.dispatch(
['run', '--entrypoint', '/bin/echo', name, 'helloworld'],
None
)
self.dispatch(['run', '--entrypoint', '/bin/echo', name, 'helloworld'])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(
@ -377,37 +375,34 @@ class CLITestCase(DockerClientTestCase):
[u'/bin/echo', u'helloworld'],
)
@mock.patch('dockerpty.start')
def test_run_service_with_user_overridden(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
def test_run_service_with_user_overridden(self):
self.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '--user={user}'.format(user=user), name]
self.command.dispatch(args, None)
self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@mock.patch('dockerpty.start')
def test_run_service_with_user_overridden_short_form(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
def test_run_service_with_user_overridden_short_form(self):
self.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '-u', user, name]
self.command.dispatch(args, None)
self.dispatch(['run', '-u', user, name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@mock.patch('dockerpty.start')
def test_run_service_with_environement_overridden(self, _):
def test_run_service_with_environement_overridden(self):
name = 'service'
self.command.base_dir = 'tests/fixtures/environment-composefile'
self.command.dispatch(
['run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo',
'-e', 'alpha=beta', name],
None
)
self.base_dir = 'tests/fixtures/environment-composefile'
self.dispatch([
'run', '-e', 'foo=notbar',
'-e', 'allo=moto=bobo',
'-e', 'alpha=beta',
name,
'/bin/true',
])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
# env overriden
@ -419,11 +414,10 @@ class CLITestCase(DockerClientTestCase):
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
@mock.patch('dockerpty.start')
def test_run_service_without_map_ports(self, _):
def test_run_service_without_map_ports(self):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', 'simple'], None)
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', 'simple'])
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
@ -437,12 +431,10 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(port_random, None)
self.assertEqual(port_assigned, None)
@mock.patch('dockerpty.start')
def test_run_service_with_map_ports(self, _):
def test_run_service_with_map_ports(self):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '--service-ports', 'simple'], None)
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '--service-ports', 'simple'])
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
@ -460,12 +452,10 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(port_range[0], "0.0.0.0:49153")
self.assertEqual(port_range[1], "0.0.0.0:49154")
@mock.patch('dockerpty.start')
def test_run_service_with_explicitly_maped_ports(self, _):
def test_run_service_with_explicitly_maped_ports(self):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'], None)
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'])
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
@ -479,12 +469,10 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(port_short, "0.0.0.0:30000")
self.assertEqual(port_full, "0.0.0.0:30001")
@mock.patch('dockerpty.start')
def test_run_service_with_explicitly_maped_ip_ports(self, _):
def test_run_service_with_explicitly_maped_ip_ports(self):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple'], None)
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
@ -498,22 +486,20 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(port_short, "127.0.0.1:30000")
self.assertEqual(port_full, "127.0.0.1:30001")
@mock.patch('dockerpty.start')
def test_run_with_custom_name(self, _):
self.command.base_dir = 'tests/fixtures/environment-composefile'
def test_run_with_custom_name(self):
self.base_dir = 'tests/fixtures/environment-composefile'
name = 'the-container-name'
self.command.dispatch(['run', '--name', name, 'service'], None)
self.dispatch(['run', '--name', name, 'service', '/bin/true'])
service = self.project.get_service('service')
container, = service.containers(stopped=True, one_off=True)
self.assertEqual(container.name, name)
@mock.patch('dockerpty.start')
def test_run_with_networking(self, _):
def test_run_with_networking(self):
self.require_api_version('1.21')
client = docker_client(version='1.21')
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['--x-networking', 'run', 'simple', 'true'], None)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['--x-networking', 'run', 'simple', 'true'], None)
service = self.project.get_service('simple')
container, = service.containers(stopped=True, one_off=True)
networks = client.networks(names=[self.project.name])
@ -527,71 +513,70 @@ class CLITestCase(DockerClientTestCase):
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '--force'], None)
self.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '-f'], None)
self.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_stop(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['stop', '-t', '1'], None)
self.dispatch(['stop', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_pause_unpause(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertFalse(service.containers()[0].is_paused)
self.command.dispatch(['pause'], None)
self.dispatch(['pause'], None)
self.assertTrue(service.containers()[0].is_paused)
self.command.dispatch(['unpause'], None)
self.dispatch(['unpause'], None)
self.assertFalse(service.containers()[0].is_paused)
def test_logs_invalid_service_name(self):
with self.assertRaises(NoSuchService):
self.command.dispatch(['logs', 'madeupname'], None)
self.dispatch(['logs', 'madeupname'], returncode=1)
def test_kill(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill'], None)
self.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigstop(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has only been paused
self.assertTrue(service.containers()[0].is_running)
def test_kill_stopped_service(self):
self.command.dispatch(['up', '-d'], None)
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGKILL'], None)
self.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
@ -599,9 +584,9 @@ class CLITestCase(DockerClientTestCase):
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
service.start_container(container)
container.start()
started_at = container.dictionary['State']['StartedAt']
self.command.dispatch(['restart', '-t', '1'], None)
self.dispatch(['restart', '-t', '1'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
@ -615,53 +600,51 @@ class CLITestCase(DockerClientTestCase):
def test_scale(self):
project = self.project
self.command.scale(project, {'SERVICE=NUM': ['simple=1']})
self.dispatch(['scale', 'simple=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=3', 'another=2']})
self.dispatch(['scale', 'simple=3', 'another=2'])
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.dispatch(['scale', 'simple=1', 'another=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.dispatch(['scale', 'simple=1', 'another=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=0', 'another=0']})
self.dispatch(['scale', 'simple=0', 'another=0'])
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_port(self):
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['up', '-d'], None)
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
@mock.patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout):
self.command.dispatch(['port', 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
def get_port(number):
result = self.dispatch(['port', 'simple', str(number)])
return result.stdout.rstrip()
self.assertEqual(get_port(3000), container.get_local_port(3000))
self.assertEqual(get_port(3001), "0.0.0.0:49152")
self.assertEqual(get_port(3002), "0.0.0.0:49153")
def test_port_with_scale(self):
self.command.base_dir = 'tests/fixtures/ports-composefile-scale'
self.command.dispatch(['scale', 'simple=2'], None)
self.base_dir = 'tests/fixtures/ports-composefile-scale'
self.dispatch(['scale', 'simple=2'], None)
containers = sorted(
self.project.containers(service_names=['simple']),
key=attrgetter('name'))
@mock.patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout, index=None):
def get_port(number, index=None):
if index is None:
self.command.dispatch(['port', 'simple', str(number)], None)
result = self.dispatch(['port', 'simple', str(number)])
else:
self.command.dispatch(['port', '--index=' + str(index), 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
return result.stdout.rstrip()
self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
@ -670,8 +653,8 @@ class CLITestCase(DockerClientTestCase):
def test_env_file_relative_to_compose_file(self):
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
self.command.dispatch(['-f', config_path, 'up', '-d'], None)
self._project = get_project(self.command.base_dir, [config_path])
self.dispatch(['-f', config_path, 'up', '-d'], None)
self._project = get_project(self.base_dir, [config_path])
containers = self.project.containers(stopped=True)
self.assertEqual(len(containers), 1)
@ -681,20 +664,18 @@ class CLITestCase(DockerClientTestCase):
def test_home_and_env_var_in_volume_path(self):
os.environ['VOLUME_NAME'] = 'my-volume'
os.environ['HOME'] = '/tmp/home-dir'
expected_host_path = os.path.join(os.environ['HOME'], os.environ['VOLUME_NAME'])
self.command.base_dir = 'tests/fixtures/volume-path-interpolation'
self.command.dispatch(['up', '-d'], None)
self.base_dir = 'tests/fixtures/volume-path-interpolation'
self.dispatch(['up', '-d'], None)
container = self.project.containers(stopped=True)[0]
actual_host_path = container.get('Volumes')['/container-path']
components = actual_host_path.split('/')
self.assertTrue(components[-2:] == ['home-dir', 'my-volume'],
msg="Last two components differ: %s, %s" % (actual_host_path, expected_host_path))
assert components[-2:] == ['home-dir', 'my-volume']
def test_up_with_default_override_file(self):
self.command.base_dir = 'tests/fixtures/override-files'
self.command.dispatch(['up', '-d'], None)
self.base_dir = 'tests/fixtures/override-files'
self.dispatch(['up', '-d'], None)
containers = self.project.containers()
self.assertEqual(len(containers), 2)
@ -704,15 +685,15 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(db.human_readable_command, 'top')
def test_up_with_multiple_files(self):
self.command.base_dir = 'tests/fixtures/override-files'
self.base_dir = 'tests/fixtures/override-files'
config_paths = [
'docker-compose.yml',
'docker-compose.override.yml',
'extra.yml',
]
self._project = get_project(self.command.base_dir, config_paths)
self.command.dispatch(
self._project = get_project(self.base_dir, config_paths)
self.dispatch(
[
'-f', config_paths[0],
'-f', config_paths[1],
@ -731,8 +712,8 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(other.human_readable_command, 'top')
def test_up_with_extends(self):
self.command.base_dir = 'tests/fixtures/extends'
self.command.dispatch(['up', '-d'], None)
self.base_dir = 'tests/fixtures/extends'
self.dispatch(['up', '-d'], None)
self.assertEqual(
set([s.name for s in self.project.services]),

View File

@ -0,0 +1,6 @@
simple:
image: busybox:latest
command: echo simple
another:
image: busybox:latest
command: echo another

View File

@ -5,7 +5,7 @@ bar:
web:
extends:
file: circle-2.yml
service: web
service: other
baz:
image: busybox
quux:

View File

@ -2,7 +2,7 @@ foo:
image: busybox
bar:
image: busybox
web:
other:
extends:
file: circle-1.yml
service: web

View File

@ -0,0 +1,7 @@
FROM busybox:latest
LABEL com.docker.compose.test_image=true
LABEL com.docker.compose.test_failing_image=true
# With the following label the container wil be cleaned up automatically
# Must be kept in sync with LABEL_PROJECT from compose/const.py
LABEL com.docker.compose.project=composetest
RUN exit 1

View File

@ -0,0 +1,2 @@
simple:
build: .

View File

@ -7,6 +7,7 @@ from compose.const import LABEL_PROJECT
from compose.container import Container
from compose.project import Project
from compose.service import ConvergenceStrategy
from compose.service import Net
from compose.service import VolumeFromSpec
@ -111,6 +112,7 @@ class ProjectTest(DockerClientTestCase):
network_name = 'network_does_exist'
project = Project(network_name, [], client)
client.create_network(network_name)
self.addCleanup(client.remove_network, network_name)
assert project.get_network()['Name'] == network_name
def test_net_from_service(self):
@ -398,6 +400,20 @@ class ProjectTest(DockerClientTestCase):
self.assertEqual(len(project.get_service('data').containers(stopped=True)), 1)
self.assertEqual(len(project.get_service('console').containers()), 0)
def test_project_up_with_custom_network(self):
self.require_api_version('1.21')
client = docker_client(version='1.21')
network_name = 'composetest-custom'
client.create_network(network_name)
self.addCleanup(client.remove_network, network_name)
web = self.create_service('web', net=Net(network_name))
project = Project('composetest', [web], client, use_networking=True)
project.up()
assert project.get_network() is None
def test_unscale_after_restart(self):
web = self.create_service('web')
project = Project('composetest', [web], self.client)

View File

@ -13,7 +13,7 @@ class ResilienceTest(DockerClientTestCase):
self.project = Project('composetest', [self.db], self.client)
container = self.db.create_container()
self.db.start_container(container)
container.start()
self.host_path = container.get('Volumes')['/var/db']
def test_successful_recreate(self):
@ -31,7 +31,7 @@ class ResilienceTest(DockerClientTestCase):
self.assertEqual(container.get('Volumes')['/var/db'], self.host_path)
def test_start_failure(self):
with mock.patch('compose.service.Service.start_container', crash):
with mock.patch('compose.container.Container.start', crash):
with self.assertRaises(Crash):
self.project.up(strategy=ConvergenceStrategy.always)

View File

@ -14,6 +14,7 @@ from .. import mock
from .testcases import DockerClientTestCase
from .testcases import pull_busybox
from compose import __version__
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_CONTAINER_NUMBER
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
@ -23,6 +24,7 @@ from compose.container import Container
from compose.service import build_extra_hosts
from compose.service import ConfigError
from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy
from compose.service import Net
from compose.service import Service
from compose.service import VolumeFromSpec
@ -30,7 +32,8 @@ from compose.service import VolumeFromSpec
def create_and_start_container(service, **override_options):
container = service.create_container(**override_options)
return service.start_container(container)
container.start()
return container
class ServiceTest(DockerClientTestCase):
@ -115,19 +118,19 @@ class ServiceTest(DockerClientTestCase):
def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=['/var/db'])
container = service.create_container()
service.start_container(container)
container.start()
self.assertIn('/var/db', container.get('Volumes'))
def test_create_container_with_volume_driver(self):
service = self.create_service('db', volume_driver='foodriver')
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual('foodriver', container.get('Config.VolumeDriver'))
def test_create_container_with_cpu_shares(self):
service = self.create_service('db', cpu_shares=73)
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual(container.get('HostConfig.CpuShares'), 73)
def test_build_extra_hosts(self):
@ -165,7 +168,7 @@ class ServiceTest(DockerClientTestCase):
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
def test_create_container_with_extra_hosts_dicts(self):
@ -173,33 +176,33 @@ class ServiceTest(DockerClientTestCase):
extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
def test_create_container_with_cpu_set(self):
service = self.create_service('db', cpuset='0')
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
def test_create_container_with_read_only_root_fs(self):
read_only = True
service = self.create_service('db', read_only=read_only)
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual(container.get('HostConfig.ReadonlyRootfs'), read_only, container.get('HostConfig'))
def test_create_container_with_security_opt(self):
security_opt = ['label:disable']
service = self.create_service('db', security_opt=security_opt)
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
def test_create_container_with_mac_address(self):
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
container = service.create_container()
service.start_container(container)
container.start()
self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
def test_create_container_with_specified_volume(self):
@ -208,7 +211,7 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', volumes=['%s:%s' % (host_path, container_path)])
container = service.create_container()
service.start_container(container)
container.start()
volumes = container.inspect()['Volumes']
self.assertIn(container_path, volumes)
@ -281,7 +284,7 @@ class ServiceTest(DockerClientTestCase):
]
)
host_container = host_service.create_container()
host_service.start_container(host_container)
host_container.start()
self.assertIn(volume_container_1.id + ':rw',
host_container.get('HostConfig.VolumesFrom'))
self.assertIn(volume_container_2.id + ':rw',
@ -300,7 +303,7 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
self.assertIn('FOO=1', old_container.get('Config.Env'))
self.assertEqual(old_container.name, 'composetest_db_1')
service.start_container(old_container)
old_container.start()
old_container.inspect() # reload volume data
volume_path = old_container.get('Volumes')['/etc']
@ -366,6 +369,33 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(list(new_container.get('Volumes')), ['/data'])
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
def test_execute_convergence_plan_when_image_volume_masks_config(self):
service = Service(
project='composetest',
name='db',
client=self.client,
build='tests/fixtures/dockerfile-with-volume',
)
old_container = create_and_start_container(service)
self.assertEqual(list(old_container.get('Volumes').keys()), ['/data'])
volume_path = old_container.get('Volumes')['/data']
service.options['volumes'] = ['/tmp:/data']
with mock.patch('compose.service.log') as mock_log:
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
mock_log.warn.assert_called_once_with(mock.ANY)
_, args, kwargs = mock_log.warn.mock_calls[0]
self.assertIn(
"Service \"db\" is using volume \"/data\" from the previous container",
args[0])
self.assertEqual(list(new_container.get('Volumes')), ['/data'])
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
def test_start_container_passes_through_options(self):
db = self.create_service('db')
create_and_start_container(db, environment={'FOO': 'BAR'})
@ -812,7 +842,13 @@ class ServiceTest(DockerClientTestCase):
environment=['ONE=1', 'TWO=2', 'THREE=3'],
env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
env = create_and_start_container(service).environment
for k, v in {'ONE': '1', 'TWO': '2', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}.items():
for k, v in {
'ONE': '1',
'TWO': '2',
'THREE': '3',
'FOO': 'baz',
'DOO': 'dah'
}.items():
self.assertEqual(env[k], v)
@mock.patch.dict(os.environ)
@ -820,9 +856,22 @@ class ServiceTest(DockerClientTestCase):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service = self.create_service('web', environment={'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': None, 'NO_DEF': None})
service = self.create_service(
'web',
environment={
'FILE_DEF': 'F1',
'FILE_DEF_EMPTY': '',
'ENV_DEF': None,
'NO_DEF': None
}
)
env = create_and_start_container(service).environment
for k, v in {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}.items():
for k, v in {
'FILE_DEF': 'F1',
'FILE_DEF_EMPTY': '',
'ENV_DEF': 'E3',
'NO_DEF': ''
}.items():
self.assertEqual(env[k], v)
def test_with_high_enough_api_version_we_get_default_network_mode(self):
@ -929,3 +978,38 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
def converge(service,
strategy=ConvergenceStrategy.changed,
do_build=True):
"""Create a converge plan from a strategy and execute the plan."""
plan = service.convergence_plan(strategy)
return service.execute_convergence_plan(plan, do_build=do_build, timeout=1)
class ConfigHashTest(DockerClientTestCase):
def test_no_config_hash_when_one_off(self):
web = self.create_service('web')
container = web.create_container(one_off=True)
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_no_config_hash_when_overriding_options(self):
web = self.create_service('web')
container = web.create_container(environment={'FOO': '1'})
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_config_hash_with_custom_labels(self):
web = self.create_service('web', labels={'foo': '1'})
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
self.assertIn('foo', container.labels)
def test_config_hash_sticks_around(self):
web = self.create_service('web', command=["top"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
web = self.create_service('web', command=["top", "-d", "1"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)

View File

@ -4,13 +4,10 @@ by `docker-compose up`.
"""
from __future__ import unicode_literals
import os
import shutil
import tempfile
import py
from .testcases import DockerClientTestCase
from compose.config import config
from compose.const import LABEL_CONFIG_HASH
from compose.project import Project
from compose.service import ConvergenceStrategy
@ -179,13 +176,18 @@ class ProjectWithDependenciesTest(ProjectTestCase):
containers = self.run_up(next_cfg)
self.assertEqual(len(containers), 2)
def test_service_recreated_when_dependency_created(self):
containers = self.run_up(self.cfg, service_names=['web'], start_deps=False)
self.assertEqual(len(containers), 1)
def converge(service,
strategy=ConvergenceStrategy.changed,
do_build=True):
"""Create a converge plan from a strategy and execute the plan."""
plan = service.convergence_plan(strategy)
return service.execute_convergence_plan(plan, do_build=do_build, timeout=1)
containers = self.run_up(self.cfg)
self.assertEqual(len(containers), 3)
web, = [c for c in containers if c.service == 'web']
nginx, = [c for c in containers if c.service == 'nginx']
self.assertEqual(web.links(), ['composetest_db_1', 'db', 'db_1'])
self.assertEqual(nginx.links(), ['composetest_web_1', 'web', 'web_1'])
class ServiceStateTest(DockerClientTestCase):
@ -241,67 +243,49 @@ class ServiceStateTest(DockerClientTestCase):
image_id = self.client.images(name='busybox')[0]['Id']
self.client.tag(image_id, repository=repo, tag=tag)
self.addCleanup(self.client.remove_image, image)
try:
web = self.create_service('web', image=image)
container = web.create_container()
web = self.create_service('web', image=image)
container = web.create_container()
# update the image
c = self.client.create_container(image, ['touch', '/hello.txt'])
self.client.commit(c, repository=repo, tag=tag)
self.client.remove_container(c)
# update the image
c = self.client.create_container(image, ['touch', '/hello.txt'])
self.client.commit(c, repository=repo, tag=tag)
self.client.remove_container(c)
web = self.create_service('web', image=image)
self.assertEqual(('recreate', [container]), web.convergence_plan())
finally:
self.client.remove_image(image)
web = self.create_service('web', image=image)
self.assertEqual(('recreate', [container]), web.convergence_plan())
def test_trigger_recreate_with_build(self):
context = tempfile.mkdtemp()
context = py.test.ensuretemp('test_trigger_recreate_with_build')
self.addCleanup(context.remove)
base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n"
dockerfile = context.join('Dockerfile')
dockerfile.write(base_image)
try:
dockerfile = os.path.join(context, 'Dockerfile')
web = self.create_service('web', build=str(context))
container = web.create_container()
with open(dockerfile, 'w') as f:
f.write(base_image)
dockerfile.write(base_image + 'CMD echo hello world\n')
web.build()
web = self.create_service('web', build=context)
container = web.create_container()
web = self.create_service('web', build=str(context))
self.assertEqual(('recreate', [container]), web.convergence_plan())
with open(dockerfile, 'w') as f:
f.write(base_image + 'CMD echo hello world\n')
web.build()
def test_image_changed_to_build(self):
context = py.test.ensuretemp('test_image_changed_to_build')
self.addCleanup(context.remove)
context.join('Dockerfile').write("""
FROM busybox
LABEL com.docker.compose.test_image=true
""")
web = self.create_service('web', build=context)
self.assertEqual(('recreate', [container]), web.convergence_plan())
finally:
shutil.rmtree(context)
web = self.create_service('web', image='busybox')
container = web.create_container()
class ConfigHashTest(DockerClientTestCase):
def test_no_config_hash_when_one_off(self):
web = self.create_service('web')
container = web.create_container(one_off=True)
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_no_config_hash_when_overriding_options(self):
web = self.create_service('web')
container = web.create_container(environment={'FOO': '1'})
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_config_hash_with_custom_labels(self):
web = self.create_service('web', labels={'foo': '1'})
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
self.assertIn('foo', container.labels)
def test_config_hash_sticks_around(self):
web = self.create_service('web', command=["top"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
web = self.create_service('web', command=["top", "-d", "1"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
web = self.create_service('web', build=str(context))
plan = web.convergence_plan()
self.assertEqual(('recreate', [container]), plan)
containers = web.execute_convergence_plan(plan)
self.assertEqual(len(containers), 1)

View File

@ -7,7 +7,9 @@ from pytest import skip
from .. import unittest
from compose.cli.docker_client import docker_client
from compose.config.config import ServiceLoader
from compose.config.config import process_service
from compose.config.config import resolve_environment
from compose.config.config import ServiceConfig
from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output
from compose.service import Service
@ -42,34 +44,13 @@ class DockerClientTestCase(unittest.TestCase):
if 'command' not in kwargs:
kwargs['command'] = ["top"]
links = kwargs.get('links', None)
volumes_from = kwargs.get('volumes_from', None)
net = kwargs.get('net', None)
workaround_options = ['links', 'volumes_from', 'net']
for key in workaround_options:
try:
del kwargs[key]
except KeyError:
pass
options = ServiceLoader(working_dir='.', filename=None, service_name=name, service_dict=kwargs).make_service_dict()
service_config = ServiceConfig('.', None, name, kwargs)
options = process_service(service_config)
options['environment'] = resolve_environment('.', kwargs)
labels = options.setdefault('labels', {})
labels['com.docker.compose.test-name'] = self.id()
if links:
options['links'] = links
if volumes_from:
options['volumes_from'] = volumes_from
if net:
options['net'] = net
return Service(
project='composetest',
client=self.client,
**options
)
return Service(name, client=self.client, project='composetest', **options)
def check_build(self, *args, **kwargs):
kwargs.setdefault('rm', True)

View File

@ -1,13 +1,13 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import mock
import pytest
import six
from compose.cli.log_printer import LogPrinter
from compose.cli.log_printer import wait_on_exit
from compose.container import Container
from tests import unittest
from tests import mock
def build_mock_container(reader):
@ -22,40 +22,52 @@ def build_mock_container(reader):
)
class LogPrinterTest(unittest.TestCase):
def get_default_output(self, monochrome=False):
def reader(*args, **kwargs):
yield b"hello\nworld"
container = build_mock_container(reader)
output = run_log_printer([container], monochrome=monochrome)
return output
@pytest.fixture
def output_stream():
output = six.StringIO()
output.flush = mock.Mock()
return output
def test_single_container(self):
output = self.get_default_output()
self.assertIn('hello', output)
self.assertIn('world', output)
@pytest.fixture
def mock_container():
def reader(*args, **kwargs):
yield b"hello\nworld"
return build_mock_container(reader)
def test_monochrome(self):
output = self.get_default_output(monochrome=True)
self.assertNotIn('\033[', output)
def test_polychrome(self):
output = self.get_default_output()
self.assertIn('\033[', output)
class TestLogPrinter(object):
def test_unicode(self):
def test_single_container(self, output_stream, mock_container):
LogPrinter([mock_container], output=output_stream).run()
output = output_stream.getvalue()
assert 'hello' in output
assert 'world' in output
# Call count is 2 lines + "container exited line"
assert output_stream.flush.call_count == 3
def test_monochrome(self, output_stream, mock_container):
LogPrinter([mock_container], output=output_stream, monochrome=True).run()
assert '\033[' not in output_stream.getvalue()
def test_polychrome(self, output_stream, mock_container):
LogPrinter([mock_container], output=output_stream).run()
assert '\033[' in output_stream.getvalue()
def test_unicode(self, output_stream):
glyph = u'\u2022'
def reader(*args, **kwargs):
yield glyph.encode('utf-8') + b'\n'
container = build_mock_container(reader)
output = run_log_printer([container])
LogPrinter([container], output=output_stream).run()
output = output_stream.getvalue()
if six.PY2:
output = output.decode('utf-8')
self.assertIn(glyph, output)
assert glyph in output
def test_wait_on_exit(self):
exit_status = 3
@ -65,24 +77,12 @@ class LogPrinterTest(unittest.TestCase):
wait=mock.Mock(return_value=exit_status))
expected = '{} exited with code {}\n'.format(mock_container.name, exit_status)
self.assertEqual(expected, wait_on_exit(mock_container))
assert expected == wait_on_exit(mock_container)
def test_generator_with_no_logs(self):
mock_container = mock.Mock(
spec=Container,
has_api_logs=False,
log_driver='none',
name_without_project='web_1',
wait=mock.Mock(return_value=0))
def test_generator_with_no_logs(self, mock_container, output_stream):
mock_container.has_api_logs = False
mock_container.log_driver = 'none'
LogPrinter([mock_container], output=output_stream).run()
output = run_log_printer([mock_container])
self.assertIn(
"WARNING: no logs are available with the 'none' log driver\n",
output
)
def run_log_printer(containers, monochrome=False):
output = six.StringIO()
LogPrinter(containers, output=output, monochrome=monochrome).run()
return output.getvalue()
output = output_stream.getvalue()
assert "WARNING: no logs are available with the 'none' log driver\n" in output

View File

@ -18,13 +18,14 @@ from tests import unittest
def make_service_dict(name, service_dict, working_dir, filename=None):
"""
Test helper function to construct a ServiceLoader
Test helper function to construct a ServiceExtendsResolver
"""
return config.ServiceLoader(
resolver = config.ServiceExtendsResolver(config.ServiceConfig(
working_dir=working_dir,
filename=filename,
service_name=name,
service_dict=service_dict).make_service_dict()
name=name,
config=service_dict))
return config.process_service(resolver.run())
def service_sort(services):
@ -76,18 +77,38 @@ class ConfigTest(unittest.TestCase):
)
def test_config_invalid_service_names(self):
with self.assertRaises(ConfigurationError):
for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
config.load(
build_config_details(
{invalid_name: {'image': 'busybox'}},
'working_dir',
'filename.yml'
)
)
for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{invalid_name: {'image': 'busybox'}},
'working_dir',
'filename.yml'))
assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly()
def test_load_with_invalid_field_name(self):
config_details = build_config_details(
{'web': {'image': 'busybox', 'name': 'bogus'}},
'working_dir',
'filename.yml')
with pytest.raises(ConfigurationError) as exc:
config.load(config_details)
error_msg = "Unsupported config option for 'web' service: 'name'"
assert error_msg in exc.exconly()
assert "Validation failed in file 'filename.yml'" in exc.exconly()
def test_load_invalid_service_definition(self):
config_details = build_config_details(
{'web': 'wrong'},
'working_dir',
'filename.yml')
with pytest.raises(ConfigurationError) as exc:
config.load(config_details)
error_msg = "service 'web' doesn't have any configuration options"
assert error_msg in exc.exconly()
def test_config_integer_service_name_raise_validation_error(self):
expected_error_msg = "Service name: 1 needs to be a string, eg '1'"
expected_error_msg = ("In file 'filename.yml' service name: 1 needs to "
"be a string, eg '1'")
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
@ -137,25 +158,26 @@ class ConfigTest(unittest.TestCase):
def test_load_with_multiple_files_and_empty_override(self):
base_file = config.ConfigFile(
'base.yaml',
'base.yml',
{'web': {'image': 'example/web'}})
override_file = config.ConfigFile('override.yaml', None)
override_file = config.ConfigFile('override.yml', None)
details = config.ConfigDetails('.', [base_file, override_file])
with pytest.raises(ConfigurationError) as exc:
config.load(details)
assert 'Top level object needs to be a dictionary' in exc.exconly()
error_msg = "Top level object in 'override.yml' needs to be an object"
assert error_msg in exc.exconly()
def test_load_with_multiple_files_and_empty_base(self):
base_file = config.ConfigFile('base.yaml', None)
base_file = config.ConfigFile('base.yml', None)
override_file = config.ConfigFile(
'override.yaml',
'override.yml',
{'web': {'image': 'example/web'}})
details = config.ConfigDetails('.', [base_file, override_file])
with pytest.raises(ConfigurationError) as exc:
config.load(details)
assert 'Top level object needs to be a dictionary' in exc.exconly()
assert "Top level object in 'base.yml' needs to be an object" in exc.exconly()
def test_load_with_multiple_files_and_extends_in_override_file(self):
base_file = config.ConfigFile(
@ -177,6 +199,7 @@ class ConfigTest(unittest.TestCase):
details = config.ConfigDetails('.', [base_file, override_file])
tmpdir = py.test.ensuretemp('config_test')
self.addCleanup(tmpdir.remove)
tmpdir.join('common.yml').write("""
base:
labels: ['label=one']
@ -194,15 +217,28 @@ class ConfigTest(unittest.TestCase):
]
self.assertEqual(service_sort(service_dicts), service_sort(expected))
def test_load_with_multiple_files_and_invalid_override(self):
base_file = config.ConfigFile(
'base.yaml',
{'web': {'image': 'example/web'}})
override_file = config.ConfigFile(
'override.yaml',
{'bogus': 'thing'})
details = config.ConfigDetails('.', [base_file, override_file])
with pytest.raises(ConfigurationError) as exc:
config.load(details)
assert "service 'bogus' doesn't have any configuration" in exc.exconly()
assert "In file 'override.yaml'" in exc.exconly()
def test_config_valid_service_names(self):
for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
config.load(
services = config.load(
build_config_details(
{valid_name: {'image': 'busybox'}},
'tests/fixtures/extends',
'common.yml'
)
)
'common.yml'))
assert services[0]['name'] == valid_name
def test_config_invalid_ports_format_validation(self):
expected_error_msg = "Service 'web' configuration key 'ports' contains an invalid type"
@ -267,7 +303,8 @@ class ConfigTest(unittest.TestCase):
)
def test_invalid_config_not_a_dictionary(self):
expected_error_msg = "Top level object needs to be a dictionary."
expected_error_msg = ("Top level object in 'filename.yml' needs to be "
"an object.")
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
@ -348,6 +385,60 @@ class ConfigTest(unittest.TestCase):
)
)
def test_config_ulimits_invalid_keys_validation_error(self):
expected = ("Service 'web' configuration key 'ulimits' 'nofile' contains "
"unsupported option: 'not_soft_or_hard'")
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{
'web': {
'image': 'busybox',
'ulimits': {
'nofile': {
"not_soft_or_hard": 100,
"soft": 10000,
"hard": 20000,
}
}
}
},
'working_dir',
'filename.yml'))
assert expected in exc.exconly()
def test_config_ulimits_required_keys_validation_error(self):
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{
'web': {
'image': 'busybox',
'ulimits': {'nofile': {"soft": 10000}}
}
},
'working_dir',
'filename.yml'))
assert "Service 'web' configuration key 'ulimits' 'nofile'" in exc.exconly()
assert "'hard' is a required property" in exc.exconly()
def test_config_ulimits_soft_greater_than_hard_error(self):
expected = "cannot contain a 'soft' value higher than 'hard' value"
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(
{
'web': {
'image': 'busybox',
'ulimits': {
'nofile': {"soft": 10000, "hard": 1000}
}
}
},
'working_dir',
'filename.yml'))
assert expected in exc.exconly()
def test_valid_config_which_allows_two_type_definitions(self):
expose_values = [["8000"], [8000]]
for expose in expose_values:
@ -395,23 +486,22 @@ class ConfigTest(unittest.TestCase):
self.assertTrue(mock_logging.warn.called)
self.assertTrue(expected_warning_msg in mock_logging.warn.call_args[0][0])
def test_config_invalid_environment_dict_key_raises_validation_error(self):
expected_error_msg = "Service 'web' configuration key 'environment' contains unsupported option: '---'"
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
config.load(
build_config_details(
{'web': {
'image': 'busybox',
'environment': {'---': 'nope'}
}},
'working_dir',
'filename.yml'
)
def test_config_valid_environment_dict_key_contains_dashes(self):
services = config.load(
build_config_details(
{'web': {
'image': 'busybox',
'environment': {'SPRING_JPA_HIBERNATE_DDL-AUTO': 'none'}
}},
'working_dir',
'filename.yml'
)
)
self.assertEqual(services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'], 'none')
def test_load_yaml_with_yaml_error(self):
tmpdir = py.test.ensuretemp('invalid_yaml_test')
self.addCleanup(tmpdir.remove)
invalid_yaml_file = tmpdir.join('docker-compose.yml')
invalid_yaml_file.write("""
web:
@ -573,6 +663,11 @@ class VolumeConfigTest(unittest.TestCase):
}, working_dir='.')
self.assertEqual(d['volumes'], ['~:/data'])
def test_volume_path_with_non_ascii_directory(self):
volume = u'/Füü/data:/data'
container_path = config.resolve_volume_path(".", volume)
self.assertEqual(container_path, volume)
class MergePathMappingTest(object):
def config_name(self):
@ -768,7 +863,7 @@ class MemoryOptionsTest(unittest.TestCase):
a mem_limit
"""
expected_error_msg = (
"Invalid 'memswap_limit' configuration for 'foo' service: when "
"Service 'foo' configuration key 'memswap_limit' is invalid: when "
"defining 'memswap_limit' you must set 'mem_limit' as well"
)
with self.assertRaisesRegexp(ConfigurationError, expected_error_msg):
@ -999,18 +1094,19 @@ class ExtendsTest(unittest.TestCase):
]))
def test_circular(self):
try:
with pytest.raises(config.CircularReference) as exc:
load_from_filename('tests/fixtures/extends/circle-1.yml')
raise Exception("Expected config.CircularReference to be raised")
except config.CircularReference as e:
self.assertEqual(
[(os.path.basename(filename), service_name) for (filename, service_name) in e.trail],
[
('circle-1.yml', 'web'),
('circle-2.yml', 'web'),
('circle-1.yml', 'web'),
],
)
path = [
(os.path.basename(filename), service_name)
for (filename, service_name) in exc.value.trail
]
expected = [
('circle-1.yml', 'web'),
('circle-2.yml', 'other'),
('circle-1.yml', 'web'),
]
self.assertEqual(path, expected)
def test_extends_validation_empty_dictionary(self):
with self.assertRaisesRegexp(ConfigurationError, 'service'):

View File

@ -34,3 +34,34 @@ class ProgressStreamTestCase(unittest.TestCase):
]
events = progress_stream.stream_output(output, StringIO())
self.assertEqual(len(events), 1)
def test_stream_output_progress_event_tty(self):
events = [
b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}'
]
class TTYStringIO(StringIO):
def isatty(self):
return True
output = TTYStringIO()
events = progress_stream.stream_output(events, output)
self.assertTrue(len(output.getvalue()) > 0)
def test_stream_output_progress_event_no_tty(self):
events = [
b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}'
]
output = StringIO()
events = progress_stream.stream_output(events, output)
self.assertEqual(len(output.getvalue()), 0)
def test_stream_output_no_progress_event_no_tty(self):
events = [
b'{"status": "Pulling from library/xy", "id": "latest"}'
]
output = StringIO()
events = progress_stream.stream_output(events, output)
self.assertTrue(len(output.getvalue()) > 0)

View File

@ -7,6 +7,8 @@ from .. import unittest
from compose.const import LABEL_SERVICE
from compose.container import Container
from compose.project import Project
from compose.service import ContainerNet
from compose.service import Net
from compose.service import Service
@ -263,6 +265,32 @@ class ProjectTest(unittest.TestCase):
service = project.get_service('test')
self.assertEqual(service.net.mode, 'container:' + container_name)
def test_uses_default_network_true(self):
web = Service('web', project='test', image="alpine", net=Net('test'))
db = Service('web', project='test', image="alpine", net=Net('other'))
project = Project('test', [web, db], None)
assert project.uses_default_network()
def test_uses_default_network_custom_name(self):
web = Service('web', project='test', image="alpine", net=Net('other'))
project = Project('test', [web], None)
assert not project.uses_default_network()
def test_uses_default_network_host(self):
web = Service('web', project='test', image="alpine", net=Net('host'))
project = Project('test', [web], None)
assert not project.uses_default_network()
def test_uses_default_network_container(self):
container = mock.Mock(id='test')
web = Service(
'web',
project='test',
image="alpine",
net=ContainerNet(container))
project = Project('test', [web], None)
assert not project.uses_default_network()
def test_container_without_name(self):
self.mock_client.containers.return_value = [
{'Image': 'busybox:latest', 'Id': '1', 'Name': '1'},

View File

@ -12,6 +12,7 @@ from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.container import Container
from compose.service import build_ulimits
from compose.service import build_volume_binding
from compose.service import ConfigError
from compose.service import ContainerNet
@ -213,16 +214,6 @@ class ServiceTest(unittest.TestCase):
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertIsNone(opts.get('hostname'))
def test_hostname_defaults_to_service_name_when_using_networking(self):
service = Service(
'foo',
image='foo',
use_networking=True,
client=self.mock_client,
)
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'foo')
def test_get_container_create_options_with_name_option(self):
service = Service(
'foo',
@ -349,44 +340,38 @@ class ServiceTest(unittest.TestCase):
self.assertEqual(parse_repository_tag("user/repo@sha256:digest"), ("user/repo", "sha256:digest", "@"))
self.assertEqual(parse_repository_tag("url:5000/repo@sha256:digest"), ("url:5000/repo", "sha256:digest", "@"))
@mock.patch('compose.service.Container', autospec=True)
def test_create_container_latest_is_used_when_no_tag_specified(self, mock_container):
service = Service('foo', client=self.mock_client, image='someimage')
images = []
def pull(repo, tag=None, **kwargs):
self.assertEqual('someimage', repo)
self.assertEqual('latest', tag)
images.append({'Id': 'abc123'})
return []
service.image = lambda *args, **kwargs: mock_get_image(images)
self.mock_client.pull = pull
service.create_container()
self.assertEqual(1, len(images))
def test_create_container_with_build(self):
service = Service('foo', client=self.mock_client, build='.')
images = []
service.image = lambda *args, **kwargs: mock_get_image(images)
service.build = lambda: images.append({'Id': 'abc123'})
self.mock_client.inspect_image.side_effect = [
NoSuchImageError,
{'Id': 'abc123'},
]
self.mock_client.build.return_value = [
'{"stream": "Successfully built abcd"}',
]
service.create_container(do_build=True)
self.assertEqual(1, len(images))
self.mock_client.build.assert_called_once_with(
tag='default_foo',
dockerfile=None,
stream=True,
path='.',
pull=False,
forcerm=False,
nocache=False,
rm=True,
)
def test_create_container_no_build(self):
service = Service('foo', client=self.mock_client, build='.')
service.image = lambda: {'Id': 'abc123'}
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
service.create_container(do_build=False)
self.assertFalse(self.mock_client.build.called)
def test_create_container_no_build_but_needs_build(self):
service = Service('foo', client=self.mock_client, build='.')
service.image = lambda *args, **kwargs: mock_get_image([])
self.mock_client.inspect_image.side_effect = NoSuchImageError
with self.assertRaises(NeedsBuildError):
service.create_container(do_build=False)
@ -417,7 +402,7 @@ class ServiceTest(unittest.TestCase):
'options': {'image': 'example.com/foo'},
'links': [('one', 'one')],
'net': 'other',
'volumes_from': ['two'],
'volumes_from': [('two', 'rw')],
}
self.assertEqual(config_dict, expected)
@ -451,6 +436,47 @@ class ServiceTest(unittest.TestCase):
self.assertEqual(service._get_links(link_to_self=True), [])
def sort_by_name(dictionary_list):
return sorted(dictionary_list, key=lambda k: k['name'])
class BuildUlimitsTestCase(unittest.TestCase):
def test_build_ulimits_with_dict(self):
ulimits = build_ulimits(
{
'nofile': {'soft': 10000, 'hard': 20000},
'nproc': {'soft': 65535, 'hard': 65535}
}
)
expected = [
{'name': 'nofile', 'soft': 10000, 'hard': 20000},
{'name': 'nproc', 'soft': 65535, 'hard': 65535}
]
assert sort_by_name(ulimits) == sort_by_name(expected)
def test_build_ulimits_with_ints(self):
ulimits = build_ulimits({'nofile': 20000, 'nproc': 65535})
expected = [
{'name': 'nofile', 'soft': 20000, 'hard': 20000},
{'name': 'nproc', 'soft': 65535, 'hard': 65535}
]
assert sort_by_name(ulimits) == sort_by_name(expected)
def test_build_ulimits_with_integers_and_dicts(self):
ulimits = build_ulimits(
{
'nproc': 65535,
'nofile': {'soft': 10000, 'hard': 20000}
}
)
expected = [
{'name': 'nofile', 'soft': 10000, 'hard': 20000},
{'name': 'nproc', 'soft': 65535, 'hard': 65535}
]
assert sort_by_name(ulimits) == sort_by_name(expected)
class NetTestCase(unittest.TestCase):
def test_net(self):
@ -494,13 +520,6 @@ class NetTestCase(unittest.TestCase):
self.assertEqual(net.service_name, service_name)
def mock_get_image(images):
if images:
return images[0]
else:
raise NoSuchImageError()
class ServiceVolumesTest(unittest.TestCase):
def setUp(self):
@ -545,11 +564,11 @@ class ServiceVolumesTest(unittest.TestCase):
self.assertEqual(binding, ('/inside', '/outside:/inside:rw'))
def test_get_container_data_volumes(self):
options = [
options = [parse_volume_spec(v) for v in [
'/host/volume:/host/volume:ro',
'/new/volume',
'/existing/volume',
]
]]
self.mock_client.inspect_image.return_value = {
'ContainerConfig': {
@ -568,13 +587,13 @@ class ServiceVolumesTest(unittest.TestCase):
},
}, has_been_inspected=True)
expected = {
'/existing/volume': '/var/lib/docker/aaaaaaaa:/existing/volume:rw',
'/mnt/image/data': '/var/lib/docker/cccccccc:/mnt/image/data:rw',
}
expected = [
parse_volume_spec('/var/lib/docker/aaaaaaaa:/existing/volume:rw'),
parse_volume_spec('/var/lib/docker/cccccccc:/mnt/image/data:rw'),
]
binds = get_container_data_volumes(container, options)
self.assertEqual(binds, expected)
volumes = get_container_data_volumes(container, options)
self.assertEqual(sorted(volumes), sorted(expected))
def test_merge_volume_bindings(self):
options = [

View File

@ -1,3 +1,6 @@
# encoding: utf-8
from __future__ import unicode_literals
from .. import unittest
from compose import utils
@ -14,3 +17,16 @@ class JsonSplitterTestCase(unittest.TestCase):
utils.json_splitter(data),
({'foo': 'bar'}, '{"next": "obj"}')
)
class StreamAsTextTestCase(unittest.TestCase):
def test_stream_with_non_utf_unicode_character(self):
stream = [b'\xed\xf3\xf3']
output, = utils.stream_as_text(stream)
assert output == '<EFBFBD><EFBFBD><EFBFBD>'
def test_stream_with_utf_character(self):
stream = ['ěĝ'.encode('utf-8')]
output, = utils.stream_as_text(stream)
assert output == 'ěĝ'

View File

@ -43,4 +43,6 @@ directory = coverage-html
[flake8]
# Allow really long lines for now
max-line-length = 140
# Set this high for now
max-complexity = 20
exclude = compose/packages