mirror of
https://github.com/docker/compose.git
synced 2025-04-08 17:05:13 +02:00
Merge pull request #7294 from alexrecuenco/remove-python2-1.26goal
Clean up python2 style code
This commit is contained in:
commit
b01601a53c
@ -20,3 +20,9 @@
|
||||
language_version: 'python3.7'
|
||||
args:
|
||||
- --py3-plus
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.1.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args:
|
||||
- --py3-plus
|
||||
|
@ -1,2 +1 @@
|
||||
|
||||
__version__ = '1.27.0dev'
|
||||
|
@ -14,16 +14,16 @@ NAMES = [
|
||||
|
||||
def get_pairs():
|
||||
for i, name in enumerate(NAMES):
|
||||
yield(name, str(30 + i))
|
||||
yield('intense_' + name, str(30 + i) + ';1')
|
||||
yield (name, str(30 + i))
|
||||
yield ('intense_' + name, str(30 + i) + ';1')
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return '\033[{0}m'.format(code)
|
||||
return '\033[{}m'.format(code)
|
||||
|
||||
|
||||
def ansi_color(code, s):
|
||||
return '{0}{1}{2}'.format(ansi(code), s, ansi(0))
|
||||
return '{}{}{}'.format(ansi(code), s, ansi(0))
|
||||
|
||||
|
||||
def make_color_fn(code):
|
||||
|
@ -147,15 +147,17 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
|
||||
|
||||
def execution_context_labels(config_details, environment_file):
|
||||
extra_labels = [
|
||||
'{0}={1}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir))
|
||||
'{}={}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir))
|
||||
]
|
||||
|
||||
if not use_config_from_stdin(config_details):
|
||||
extra_labels.append('{0}={1}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)))
|
||||
extra_labels.append('{}={}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)))
|
||||
|
||||
if environment_file is not None:
|
||||
extra_labels.append('{0}={1}'.format(LABEL_ENVIRONMENT_FILE,
|
||||
os.path.normpath(environment_file)))
|
||||
extra_labels.append('{}={}'.format(
|
||||
LABEL_ENVIRONMENT_FILE,
|
||||
os.path.normpath(environment_file))
|
||||
)
|
||||
return extra_labels
|
||||
|
||||
|
||||
@ -168,7 +170,8 @@ def use_config_from_stdin(config_details):
|
||||
|
||||
def config_files_label(config_details):
|
||||
return ",".join(
|
||||
map(str, (os.path.normpath(c.filename) for c in config_details.config_files)))
|
||||
os.path.normpath(c.filename) for c in config_details.config_files
|
||||
)
|
||||
|
||||
|
||||
def get_project_name(working_dir, project_name=None, environment=None):
|
||||
|
@ -11,7 +11,7 @@ def docopt_full_help(docstring, *args, **kwargs):
|
||||
raise SystemExit(docstring)
|
||||
|
||||
|
||||
class DocoptDispatcher(object):
|
||||
class DocoptDispatcher:
|
||||
|
||||
def __init__(self, command_class, options):
|
||||
self.command_class = command_class
|
||||
@ -50,7 +50,7 @@ def get_handler(command_class, command):
|
||||
|
||||
class NoSuchCommand(Exception):
|
||||
def __init__(self, command, supercommand):
|
||||
super(NoSuchCommand, self).__init__("No such command: %s" % command)
|
||||
super().__init__("No such command: %s" % command)
|
||||
|
||||
self.command = command
|
||||
self.supercommand = supercommand
|
||||
|
@ -26,11 +26,9 @@ class UserError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = dedent(msg).strip()
|
||||
|
||||
def __unicode__(self):
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
__str__ = __unicode__
|
||||
|
||||
|
||||
class ConnectionError(Exception):
|
||||
pass
|
||||
|
@ -1,15 +1,10 @@
|
||||
import logging
|
||||
import shutil
|
||||
from shutil import get_terminal_size
|
||||
|
||||
import texttable
|
||||
|
||||
from compose.cli import colors
|
||||
|
||||
if hasattr(shutil, "get_terminal_size"):
|
||||
from shutil import get_terminal_size
|
||||
else:
|
||||
from backports.shutil_get_terminal_size import get_terminal_size
|
||||
|
||||
|
||||
def get_tty_width():
|
||||
try:
|
||||
@ -45,15 +40,15 @@ class ConsoleWarningFormatter(logging.Formatter):
|
||||
|
||||
def get_level_message(self, record):
|
||||
separator = ': '
|
||||
if record.levelno == logging.WARNING:
|
||||
return colors.yellow(record.levelname) + separator
|
||||
if record.levelno == logging.ERROR:
|
||||
if record.levelno >= logging.ERROR:
|
||||
return colors.red(record.levelname) + separator
|
||||
if record.levelno >= logging.WARNING:
|
||||
return colors.yellow(record.levelname) + separator
|
||||
|
||||
return ''
|
||||
|
||||
def format(self, record):
|
||||
if isinstance(record.msg, bytes):
|
||||
record.msg = record.msg.decode('utf-8')
|
||||
message = super(ConsoleWarningFormatter, self).format(record)
|
||||
return '{0}{1}'.format(self.get_level_message(record), message)
|
||||
message = super().format(record)
|
||||
return '{}{}'.format(self.get_level_message(record), message)
|
||||
|
@ -2,6 +2,7 @@ import _thread as thread
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from itertools import cycle
|
||||
from operator import attrgetter
|
||||
from queue import Empty
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
@ -13,7 +14,7 @@ from compose.cli.signals import ShutdownException
|
||||
from compose.utils import split_buffer
|
||||
|
||||
|
||||
class LogPresenter(object):
|
||||
class LogPresenter:
|
||||
|
||||
def __init__(self, prefix_width, color_func):
|
||||
self.prefix_width = prefix_width
|
||||
@ -50,7 +51,7 @@ def max_name_width(service_names, max_index_width=3):
|
||||
return max(len(name) for name in service_names) + max_index_width
|
||||
|
||||
|
||||
class LogPrinter(object):
|
||||
class LogPrinter:
|
||||
"""Print logs from many containers to a single output stream."""
|
||||
|
||||
def __init__(self,
|
||||
@ -133,7 +134,7 @@ def build_thread_map(initial_containers, presenters, thread_args):
|
||||
# Container order is unspecified, so they are sorted by name in order to make
|
||||
# container:presenter (log color) assignment deterministic when given a list of containers
|
||||
# with the same names.
|
||||
for container in sorted(initial_containers, key=lambda c: c.name)
|
||||
for container in sorted(initial_containers, key=attrgetter('name'))
|
||||
}
|
||||
|
||||
|
||||
@ -194,9 +195,9 @@ def build_log_generator(container, log_args):
|
||||
def wait_on_exit(container):
|
||||
try:
|
||||
exit_code = container.wait()
|
||||
return "%s exited with code %s\n" % (container.name, exit_code)
|
||||
return "{} exited with code {}\n".format(container.name, exit_code)
|
||||
except APIError as e:
|
||||
return "Unexpected API error for %s (HTTP code %s)\nResponse body:\n%s\n" % (
|
||||
return "Unexpected API error for {} (HTTP code {})\nResponse body:\n{}\n".format(
|
||||
container.name, e.response.status_code,
|
||||
e.response.text or '[empty]'
|
||||
)
|
||||
|
@ -73,7 +73,7 @@ def main():
|
||||
log.error(e.msg)
|
||||
sys.exit(1)
|
||||
except BuildError as e:
|
||||
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
|
||||
log.error("Service '{}' failed to build: {}".format(e.service.name, e.reason))
|
||||
sys.exit(1)
|
||||
except StreamOutputError as e:
|
||||
log.error(e)
|
||||
@ -175,7 +175,7 @@ def parse_doc_section(name, source):
|
||||
return [s.strip() for s in pattern.findall(source)]
|
||||
|
||||
|
||||
class TopLevelCommand(object):
|
||||
class TopLevelCommand:
|
||||
"""Define and run multi-container applications with Docker.
|
||||
|
||||
Usage:
|
||||
@ -546,7 +546,7 @@ class TopLevelCommand(object):
|
||||
key=attrgetter('name'))
|
||||
|
||||
if options['--quiet']:
|
||||
for image in set(c.image for c in containers):
|
||||
for image in {c.image for c in containers}:
|
||||
print(image.split(':')[1])
|
||||
return
|
||||
|
||||
@ -1130,7 +1130,7 @@ def compute_service_exit_code(exit_value_from, attached_containers):
|
||||
attached_containers))
|
||||
if not candidates:
|
||||
log.error(
|
||||
'No containers matching the spec "{0}" '
|
||||
'No containers matching the spec "{}" '
|
||||
'were run.'.format(exit_value_from)
|
||||
)
|
||||
return 2
|
||||
@ -1453,10 +1453,7 @@ def call_docker(args, dockeropts, environment):
|
||||
args = [executable_path] + tls_options + args
|
||||
log.debug(" ".join(map(pipes.quote, args)))
|
||||
|
||||
filtered_env = {}
|
||||
for k, v in environment.items():
|
||||
if v is not None:
|
||||
filtered_env[k] = environment[k]
|
||||
filtered_env = {k: v for k, v in environment.items() if v is not None}
|
||||
|
||||
return subprocess.call(args, env=filtered_env)
|
||||
|
||||
|
@ -11,13 +11,6 @@ import docker
|
||||
import compose
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
|
||||
# WindowsError is not defined on non-win32 platforms. Avoid runtime errors by
|
||||
# defining it as OSError (its parent class) if missing.
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
WindowsError = OSError
|
||||
|
||||
|
||||
def yesno(prompt, default=None):
|
||||
"""
|
||||
@ -58,7 +51,7 @@ def call_silently(*args, **kwargs):
|
||||
with open(os.devnull, 'w') as shutup:
|
||||
try:
|
||||
return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs)
|
||||
except WindowsError:
|
||||
except OSError:
|
||||
# On Windows, subprocess.call() can still raise exceptions. Normalize
|
||||
# to POSIXy behaviour by returning a nonzero exit code.
|
||||
return 1
|
||||
@ -120,7 +113,7 @@ def generate_user_agent():
|
||||
try:
|
||||
p_system = platform.system()
|
||||
p_release = platform.release()
|
||||
except IOError:
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
parts.append("{}/{}".format(p_system, p_release))
|
||||
@ -133,7 +126,7 @@ def human_readable_file_size(size):
|
||||
if order >= len(suffixes):
|
||||
order = len(suffixes) - 1
|
||||
|
||||
return '{0:.4g} {1}'.format(
|
||||
return '{:.4g} {}'.format(
|
||||
size / pow(10, order * 3),
|
||||
suffixes[order]
|
||||
)
|
||||
|
@ -6,13 +6,13 @@ from itertools import chain
|
||||
|
||||
def format_call(args, kwargs):
|
||||
args = (repr(a) for a in args)
|
||||
kwargs = ("{0!s}={1!r}".format(*item) for item in kwargs.items())
|
||||
return "({0})".format(", ".join(chain(args, kwargs)))
|
||||
kwargs = ("{!s}={!r}".format(*item) for item in kwargs.items())
|
||||
return "({})".format(", ".join(chain(args, kwargs)))
|
||||
|
||||
|
||||
def format_return(result, max_lines):
|
||||
if isinstance(result, (list, tuple, set)):
|
||||
return "({0} with {1} items)".format(type(result).__name__, len(result))
|
||||
return "({} with {} items)".format(type(result).__name__, len(result))
|
||||
|
||||
if result:
|
||||
lines = pprint.pformat(result).split('\n')
|
||||
@ -22,7 +22,7 @@ def format_return(result, max_lines):
|
||||
return result
|
||||
|
||||
|
||||
class VerboseProxy(object):
|
||||
class VerboseProxy:
|
||||
"""Proxy all function calls to another class and log method name, arguments
|
||||
and return values for each call.
|
||||
"""
|
||||
|
@ -1,12 +1,13 @@
|
||||
import functools
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import string
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from itertools import chain
|
||||
from operator import attrgetter
|
||||
from operator import itemgetter
|
||||
|
||||
import yaml
|
||||
from cached_property import cached_property
|
||||
@ -166,7 +167,7 @@ class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files envir
|
||||
def __new__(cls, working_dir, config_files, environment=None):
|
||||
if environment is None:
|
||||
environment = Environment.from_env_file(working_dir)
|
||||
return super(ConfigDetails, cls).__new__(
|
||||
return super().__new__(
|
||||
cls, working_dir, config_files, environment
|
||||
)
|
||||
|
||||
@ -315,8 +316,8 @@ def validate_config_version(config_files):
|
||||
|
||||
if main_file.version != next_file.version:
|
||||
raise ConfigurationError(
|
||||
"Version mismatch: file {0} specifies version {1} but "
|
||||
"extension file {2} uses version {3}".format(
|
||||
"Version mismatch: file {} specifies version {} but "
|
||||
"extension file {} uses version {}".format(
|
||||
main_file.filename,
|
||||
main_file.version,
|
||||
next_file.filename,
|
||||
@ -595,7 +596,7 @@ def process_config_file(config_file, environment, service_name=None, interpolate
|
||||
return config_file
|
||||
|
||||
|
||||
class ServiceExtendsResolver(object):
|
||||
class ServiceExtendsResolver:
|
||||
def __init__(self, service_config, config_file, environment, already_seen=None):
|
||||
self.service_config = service_config
|
||||
self.working_dir = service_config.working_dir
|
||||
@ -703,7 +704,7 @@ def resolve_build_args(buildargs, environment):
|
||||
|
||||
|
||||
def validate_extended_service_dict(service_dict, filename, service):
|
||||
error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
|
||||
error_prefix = "Cannot extend service '{}' in {}:".format(service, filename)
|
||||
|
||||
if 'links' in service_dict:
|
||||
raise ConfigurationError(
|
||||
@ -826,9 +827,9 @@ def process_ports(service_dict):
|
||||
|
||||
def process_depends_on(service_dict):
|
||||
if 'depends_on' in service_dict and not isinstance(service_dict['depends_on'], dict):
|
||||
service_dict['depends_on'] = dict([
|
||||
(svc, {'condition': 'service_started'}) for svc in service_dict['depends_on']
|
||||
])
|
||||
service_dict['depends_on'] = {
|
||||
svc: {'condition': 'service_started'} for svc in service_dict['depends_on']
|
||||
}
|
||||
return service_dict
|
||||
|
||||
|
||||
@ -1071,9 +1072,9 @@ def merge_service_dicts(base, override, version):
|
||||
|
||||
|
||||
def merge_unique_items_lists(base, override):
|
||||
override = [str(o) for o in override]
|
||||
base = [str(b) for b in base]
|
||||
return sorted(set().union(base, override))
|
||||
override = (str(o) for o in override)
|
||||
base = (str(b) for b in base)
|
||||
return sorted(set(chain(base, override)))
|
||||
|
||||
|
||||
def merge_healthchecks(base, override):
|
||||
@ -1086,9 +1087,7 @@ def merge_healthchecks(base, override):
|
||||
|
||||
def merge_ports(md, base, override):
|
||||
def parse_sequence_func(seq):
|
||||
acc = []
|
||||
for item in seq:
|
||||
acc.extend(ServicePort.parse(item))
|
||||
acc = [s for item in seq for s in ServicePort.parse(item)]
|
||||
return to_mapping(acc, 'merge_field')
|
||||
|
||||
field = 'ports'
|
||||
@ -1098,7 +1097,7 @@ def merge_ports(md, base, override):
|
||||
|
||||
merged = parse_sequence_func(md.base.get(field, []))
|
||||
merged.update(parse_sequence_func(md.override.get(field, [])))
|
||||
md[field] = [item for item in sorted(merged.values(), key=lambda x: x.target)]
|
||||
md[field] = [item for item in sorted(merged.values(), key=attrgetter("target"))]
|
||||
|
||||
|
||||
def merge_build(output, base, override):
|
||||
@ -1170,8 +1169,8 @@ def merge_reservations(base, override):
|
||||
|
||||
|
||||
def merge_unique_objects_lists(base, override):
|
||||
result = dict((json_hash(i), i) for i in base + override)
|
||||
return [i[1] for i in sorted([(k, v) for k, v in result.items()], key=lambda x: x[0])]
|
||||
result = {json_hash(i): i for i in base + override}
|
||||
return [i[1] for i in sorted(((k, v) for k, v in result.items()), key=itemgetter(0))]
|
||||
|
||||
|
||||
def merge_blkio_config(base, override):
|
||||
@ -1179,11 +1178,11 @@ def merge_blkio_config(base, override):
|
||||
md.merge_scalar('weight')
|
||||
|
||||
def merge_blkio_limits(base, override):
|
||||
index = dict((b['path'], b) for b in base)
|
||||
for o in override:
|
||||
index[o['path']] = o
|
||||
get_path = itemgetter('path')
|
||||
index = {get_path(b): b for b in base}
|
||||
index.update((get_path(o), o) for o in override)
|
||||
|
||||
return sorted(list(index.values()), key=lambda x: x['path'])
|
||||
return sorted(index.values(), key=get_path)
|
||||
|
||||
for field in [
|
||||
"device_read_bps", "device_read_iops", "device_write_bps",
|
||||
@ -1304,7 +1303,7 @@ def resolve_volume_path(working_dir, volume):
|
||||
if host_path.startswith('.'):
|
||||
host_path = expand_path(working_dir, host_path)
|
||||
host_path = os.path.expanduser(host_path)
|
||||
return u"{}:{}{}".format(host_path, container_path, (':' + mode if mode else ''))
|
||||
return "{}:{}{}".format(host_path, container_path, (':' + mode if mode else ''))
|
||||
|
||||
return container_path
|
||||
|
||||
@ -1447,13 +1446,13 @@ def has_uppercase(name):
|
||||
|
||||
def load_yaml(filename, encoding=None, binary=True):
|
||||
try:
|
||||
with io.open(filename, 'rb' if binary else 'r', encoding=encoding) as fh:
|
||||
with open(filename, 'rb' if binary else 'r', encoding=encoding) as fh:
|
||||
return yaml.safe_load(fh)
|
||||
except (IOError, yaml.YAMLError, UnicodeDecodeError) as e:
|
||||
except (OSError, yaml.YAMLError, UnicodeDecodeError) as e:
|
||||
if encoding is None:
|
||||
# Sometimes the user's locale sets an encoding that doesn't match
|
||||
# the YAML files. Im such cases, retry once with the "default"
|
||||
# UTF-8 encoding
|
||||
return load_yaml(filename, encoding='utf-8-sig', binary=False)
|
||||
error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
|
||||
raise ConfigurationError(u"{}: {}".format(error_name, e))
|
||||
raise ConfigurationError("{}: {}".format(error_name, e))
|
||||
|
@ -43,7 +43,7 @@ def env_vars_from_file(filename, interpolate=True):
|
||||
|
||||
class Environment(dict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Environment, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self.missing_keys = []
|
||||
self.silent = False
|
||||
|
||||
@ -81,11 +81,11 @@ class Environment(dict):
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return super(Environment, self).__getitem__(key)
|
||||
return super().__getitem__(key)
|
||||
except KeyError:
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
try:
|
||||
return super(Environment, self).__getitem__(key.upper())
|
||||
return super().__getitem__(key.upper())
|
||||
except KeyError:
|
||||
pass
|
||||
if not self.silent and key not in self.missing_keys:
|
||||
@ -98,20 +98,20 @@ class Environment(dict):
|
||||
return ""
|
||||
|
||||
def __contains__(self, key):
|
||||
result = super(Environment, self).__contains__(key)
|
||||
result = super().__contains__(key)
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return (
|
||||
result or super(Environment, self).__contains__(key.upper())
|
||||
result or super().__contains__(key.upper())
|
||||
)
|
||||
return result
|
||||
|
||||
def get(self, key, *args, **kwargs):
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return super(Environment, self).get(
|
||||
return super().get(
|
||||
key,
|
||||
super(Environment, self).get(key.upper(), *args, **kwargs)
|
||||
super().get(key.upper(), *args, **kwargs)
|
||||
)
|
||||
return super(Environment, self).get(key, *args, **kwargs)
|
||||
return super().get(key, *args, **kwargs)
|
||||
|
||||
def get_boolean(self, key):
|
||||
# Convert a value to a boolean using "common sense" rules.
|
||||
|
@ -1,5 +1,3 @@
|
||||
|
||||
|
||||
VERSION_EXPLANATION = (
|
||||
'You might be seeing this error because you\'re using the wrong Compose file version. '
|
||||
'Either specify a supported version (e.g "2.2" or "3.3") and place '
|
||||
@ -40,7 +38,7 @@ class CircularReference(ConfigurationError):
|
||||
|
||||
class ComposeFileNotFound(ConfigurationError):
|
||||
def __init__(self, supported_filenames):
|
||||
super(ComposeFileNotFound, self).__init__("""
|
||||
super().__init__("""
|
||||
Can't find a suitable configuration file in this directory or any
|
||||
parent. Are you in the right directory?
|
||||
|
||||
@ -51,7 +49,7 @@ class ComposeFileNotFound(ConfigurationError):
|
||||
class DuplicateOverrideFileFound(ConfigurationError):
|
||||
def __init__(self, override_filenames):
|
||||
self.override_filenames = override_filenames
|
||||
super(DuplicateOverrideFileFound, self).__init__(
|
||||
super().__init__(
|
||||
"Multiple override files found: {}. You may only use a single "
|
||||
"override file.".format(", ".join(override_filenames))
|
||||
)
|
||||
|
@ -11,7 +11,7 @@ from compose.utils import parse_nanoseconds_int
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Interpolator(object):
|
||||
class Interpolator:
|
||||
|
||||
def __init__(self, templater, mapping):
|
||||
self.templater = templater
|
||||
@ -31,15 +31,15 @@ def interpolate_environment_variables(version, config, section, environment):
|
||||
interpolator = Interpolator(TemplateWithDefaults, environment)
|
||||
|
||||
def process_item(name, config_dict):
|
||||
return dict(
|
||||
(key, interpolate_value(name, key, val, section, interpolator))
|
||||
return {
|
||||
key: interpolate_value(name, key, val, section, interpolator)
|
||||
for key, val in (config_dict or {}).items()
|
||||
)
|
||||
}
|
||||
|
||||
return dict(
|
||||
(name, process_item(name, config_dict or {}))
|
||||
return {
|
||||
name: process_item(name, config_dict or {})
|
||||
for name, config_dict in config.items()
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def get_config_path(config_key, section, name):
|
||||
@ -75,10 +75,10 @@ def recursive_interpolate(obj, interpolator, config_path):
|
||||
if isinstance(obj, str):
|
||||
return converter.convert(config_path, interpolator.interpolate(obj))
|
||||
if isinstance(obj, dict):
|
||||
return dict(
|
||||
(key, recursive_interpolate(val, interpolator, append(config_path, key)))
|
||||
for (key, val) in obj.items()
|
||||
)
|
||||
return {
|
||||
key: recursive_interpolate(val, interpolator, append(config_path, key))
|
||||
for key, val in obj.items()
|
||||
}
|
||||
if isinstance(obj, list):
|
||||
return [recursive_interpolate(val, interpolator, config_path) for val in obj]
|
||||
return converter.convert(config_path, obj)
|
||||
@ -135,7 +135,7 @@ class TemplateWithDefaults(Template):
|
||||
val = mapping[named]
|
||||
if isinstance(val, bytes):
|
||||
val = val.decode('utf-8')
|
||||
return '%s' % (val,)
|
||||
return '{}'.format(val)
|
||||
if mo.group('escaped') is not None:
|
||||
return self.delimiter
|
||||
if mo.group('invalid') is not None:
|
||||
@ -224,7 +224,7 @@ def to_microseconds(v):
|
||||
return int(parse_nanoseconds_int(v) / 1000)
|
||||
|
||||
|
||||
class ConversionMap(object):
|
||||
class ConversionMap:
|
||||
map = {
|
||||
service_path('blkio_config', 'weight'): to_int,
|
||||
service_path('blkio_config', 'weight_device', 'weight'): to_int,
|
||||
|
@ -104,7 +104,7 @@ def serialize_ns_time_value(value):
|
||||
result = (int(value), stage[1])
|
||||
else:
|
||||
break
|
||||
return '{0}{1}'.format(*result)
|
||||
return '{}{}'.format(*result)
|
||||
|
||||
|
||||
def denormalize_service_dict(service_dict, version, image_digest=None):
|
||||
|
@ -21,7 +21,7 @@ def get_source_name_from_network_mode(network_mode, source_type):
|
||||
|
||||
|
||||
def get_service_names(links):
|
||||
return [link.split(':')[0] for link in links]
|
||||
return [link.split(':', 1)[0] for link in links]
|
||||
|
||||
|
||||
def get_service_names_from_volumes_from(volumes_from):
|
||||
|
@ -146,7 +146,7 @@ def normpath(path, win_host=False):
|
||||
return path
|
||||
|
||||
|
||||
class MountSpec(object):
|
||||
class MountSpec:
|
||||
options_map = {
|
||||
'volume': {
|
||||
'nocopy': 'no_copy'
|
||||
@ -338,9 +338,9 @@ class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid
|
||||
return self.source
|
||||
|
||||
def repr(self):
|
||||
return dict(
|
||||
[(k, v) for k, v in zip(self._fields, self) if v is not None]
|
||||
)
|
||||
return {
|
||||
k: v for k, v in zip(self._fields, self) if v is not None
|
||||
}
|
||||
|
||||
|
||||
class ServiceSecret(ServiceConfigBase):
|
||||
@ -362,10 +362,7 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
|
||||
if published:
|
||||
if isinstance(published, str) and '-' in published: # "x-y:z" format
|
||||
a, b = published.split('-', 1)
|
||||
try:
|
||||
int(a)
|
||||
int(b)
|
||||
except ValueError:
|
||||
if not a.isdigit() or not b.isdigit():
|
||||
raise ConfigurationError('Invalid published port: {}'.format(published))
|
||||
else:
|
||||
try:
|
||||
@ -373,7 +370,7 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
|
||||
except ValueError:
|
||||
raise ConfigurationError('Invalid published port: {}'.format(published))
|
||||
|
||||
return super(ServicePort, cls).__new__(
|
||||
return super().__new__(
|
||||
cls, target, published, *args, **kwargs
|
||||
)
|
||||
|
||||
@ -422,9 +419,9 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
|
||||
return (self.target, self.published, self.external_ip, self.protocol)
|
||||
|
||||
def repr(self):
|
||||
return dict(
|
||||
[(k, v) for k, v in zip(self._fields, self) if v is not None]
|
||||
)
|
||||
return {
|
||||
k: v for k, v in zip(self._fields, self) if v is not None
|
||||
}
|
||||
|
||||
def legacy_repr(self):
|
||||
return normalize_port_dict(self.repr())
|
||||
@ -484,9 +481,9 @@ class SecurityOpt(namedtuple('_SecurityOpt', 'value src_file')):
|
||||
|
||||
if con[0] == 'seccomp' and con[1] != 'unconfined':
|
||||
try:
|
||||
with open(unquote_path(con[1]), 'r') as f:
|
||||
with open(unquote_path(con[1])) as f:
|
||||
seccomp_data = json.load(f)
|
||||
except (IOError, ValueError) as e:
|
||||
except (OSError, ValueError) as e:
|
||||
raise ConfigurationError('Error reading seccomp profile: {}'.format(e))
|
||||
return cls(
|
||||
'seccomp={}'.format(json.dumps(seccomp_data)), con[1]
|
||||
|
@ -100,7 +100,7 @@ def match_named_volumes(service_dict, project_volumes):
|
||||
for volume_spec in service_volumes:
|
||||
if volume_spec.is_named_volume and volume_spec.external not in project_volumes:
|
||||
raise ConfigurationError(
|
||||
'Named volume "{0}" is used in service "{1}" but no'
|
||||
'Named volume "{}" is used in service "{}" but no'
|
||||
' declaration was found in the volumes section.'.format(
|
||||
volume_spec.repr(), service_dict.get('name')
|
||||
)
|
||||
@ -508,13 +508,13 @@ def load_jsonschema(version):
|
||||
|
||||
filename = os.path.join(
|
||||
get_schema_path(),
|
||||
"config_schema_{0}.json".format(suffix))
|
||||
"config_schema_{}.json".format(suffix))
|
||||
|
||||
if not os.path.exists(filename):
|
||||
raise ConfigurationError(
|
||||
'Version in "{}" is unsupported. {}'
|
||||
.format(filename, VERSION_EXPLANATION))
|
||||
with open(filename, "r") as fh:
|
||||
with open(filename) as fh:
|
||||
return json.load(fh)
|
||||
|
||||
|
||||
@ -534,7 +534,7 @@ def handle_errors(errors, format_error_func, filename):
|
||||
gone wrong. Process each error and pull out relevant information and re-write
|
||||
helpful error messages that are relevant.
|
||||
"""
|
||||
errors = list(sorted(errors, key=str))
|
||||
errors = sorted(errors, key=str)
|
||||
if not errors:
|
||||
return
|
||||
|
||||
|
@ -12,7 +12,7 @@ from .utils import truncate_id
|
||||
from .version import ComposeVersion
|
||||
|
||||
|
||||
class Container(object):
|
||||
class Container:
|
||||
"""
|
||||
Represents a Docker container, constructed from the output of
|
||||
GET /containers/:id:/json.
|
||||
@ -78,8 +78,8 @@ class Container(object):
|
||||
|
||||
@property
|
||||
def name_without_project(self):
|
||||
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
|
||||
return '{0}_{1}'.format(self.service, self.number if self.number is not None else self.slug)
|
||||
if self.name.startswith('{}_{}'.format(self.project, self.service)):
|
||||
return '{}_{}'.format(self.service, self.number if self.number is not None else self.slug)
|
||||
else:
|
||||
return self.name
|
||||
|
||||
@ -91,7 +91,7 @@ class Container(object):
|
||||
|
||||
number = self.labels.get(LABEL_CONTAINER_NUMBER)
|
||||
if not number:
|
||||
raise ValueError("Container {0} does not have a {1} label".format(
|
||||
raise ValueError("Container {} does not have a {} label".format(
|
||||
self.short_id, LABEL_CONTAINER_NUMBER))
|
||||
return int(number)
|
||||
|
||||
@ -224,7 +224,7 @@ class Container(object):
|
||||
return reduce(get_value, key.split('.'), self.dictionary)
|
||||
|
||||
def get_local_port(self, port, protocol='tcp'):
|
||||
port = self.ports.get("%s/%s" % (port, protocol))
|
||||
port = self.ports.get("{}/{}".format(port, protocol))
|
||||
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
|
||||
|
||||
def get_mount(self, mount_dest):
|
||||
@ -266,7 +266,7 @@ class Container(object):
|
||||
"""
|
||||
if not self.name.startswith(self.short_id):
|
||||
self.client.rename(
|
||||
self.id, '{0}_{1}'.format(self.short_id, self.name)
|
||||
self.id, '{}_{}'.format(self.short_id, self.name)
|
||||
)
|
||||
|
||||
def inspect_if_not_inspected(self):
|
||||
@ -309,7 +309,7 @@ class Container(object):
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return '<Container: %s (%s)>' % (self.name, self.id[:6])
|
||||
return '<Container: {} ({})>'.format(self.name, self.id[:6])
|
||||
|
||||
def __eq__(self, other):
|
||||
if type(self) != type(other):
|
||||
|
@ -1,5 +1,3 @@
|
||||
|
||||
|
||||
class OperationFailedError(Exception):
|
||||
def __init__(self, reason):
|
||||
self.msg = reason
|
||||
@ -17,14 +15,14 @@ class HealthCheckException(Exception):
|
||||
|
||||
class HealthCheckFailed(HealthCheckException):
|
||||
def __init__(self, container_id):
|
||||
super(HealthCheckFailed, self).__init__(
|
||||
super().__init__(
|
||||
'Container "{}" is unhealthy.'.format(container_id)
|
||||
)
|
||||
|
||||
|
||||
class NoHealthCheckConfigured(HealthCheckException):
|
||||
def __init__(self, service_name):
|
||||
super(NoHealthCheckConfigured, self).__init__(
|
||||
super().__init__(
|
||||
'Service "{}" is missing a healthcheck configuration'.format(
|
||||
service_name
|
||||
)
|
||||
|
@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from operator import itemgetter
|
||||
|
||||
from docker.errors import NotFound
|
||||
from docker.types import IPAMConfig
|
||||
@ -24,7 +25,7 @@ OPTS_EXCEPTIONS = [
|
||||
]
|
||||
|
||||
|
||||
class Network(object):
|
||||
class Network:
|
||||
def __init__(self, client, project, name, driver=None, driver_opts=None,
|
||||
ipam=None, external=False, internal=False, enable_ipv6=False,
|
||||
labels=None, custom_name=False):
|
||||
@ -51,7 +52,7 @@ class Network(object):
|
||||
try:
|
||||
self.inspect()
|
||||
log.debug(
|
||||
'Network {0} declared as external. No new '
|
||||
'Network {} declared as external. No new '
|
||||
'network will be created.'.format(self.name)
|
||||
)
|
||||
except NotFound:
|
||||
@ -107,7 +108,7 @@ class Network(object):
|
||||
def legacy_full_name(self):
|
||||
if self.custom_name:
|
||||
return self.name
|
||||
return '{0}_{1}'.format(
|
||||
return '{}_{}'.format(
|
||||
re.sub(r'[_-]', '', self.project), self.name
|
||||
)
|
||||
|
||||
@ -115,7 +116,7 @@ class Network(object):
|
||||
def full_name(self):
|
||||
if self.custom_name:
|
||||
return self.name
|
||||
return '{0}_{1}'.format(self.project, self.name)
|
||||
return '{}_{}'.format(self.project, self.name)
|
||||
|
||||
@property
|
||||
def true_name(self):
|
||||
@ -167,7 +168,7 @@ def create_ipam_config_from_dict(ipam_dict):
|
||||
|
||||
class NetworkConfigChangedError(ConfigurationError):
|
||||
def __init__(self, net_name, property_name):
|
||||
super(NetworkConfigChangedError, self).__init__(
|
||||
super().__init__(
|
||||
'Network "{}" needs to be recreated - {} has changed'.format(
|
||||
net_name, property_name
|
||||
)
|
||||
@ -258,7 +259,7 @@ def build_networks(name, config_data, client):
|
||||
return networks
|
||||
|
||||
|
||||
class ProjectNetworks(object):
|
||||
class ProjectNetworks:
|
||||
|
||||
def __init__(self, networks, use_networking):
|
||||
self.networks = networks or {}
|
||||
@ -299,10 +300,10 @@ def get_network_defs_for_service(service_dict):
|
||||
if 'network_mode' in service_dict:
|
||||
return {}
|
||||
networks = service_dict.get('networks', {'default': None})
|
||||
return dict(
|
||||
(net, (config or {}))
|
||||
return {
|
||||
net: (config or {})
|
||||
for net, config in networks.items()
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def get_network_names_for_service(service_dict):
|
||||
@ -328,4 +329,4 @@ def get_networks(service_dict, network_definitions):
|
||||
else:
|
||||
# Ensure Compose will pick a consistent primary network if no
|
||||
# priority is set
|
||||
return OrderedDict(sorted(networks.items(), key=lambda t: t[0]))
|
||||
return OrderedDict(sorted(networks.items(), key=itemgetter(0)))
|
||||
|
@ -25,7 +25,7 @@ log = logging.getLogger(__name__)
|
||||
STOP = object()
|
||||
|
||||
|
||||
class GlobalLimit(object):
|
||||
class GlobalLimit:
|
||||
"""Simple class to hold a global semaphore limiter for a project. This class
|
||||
should be treated as a singleton that is instantiated when the project is.
|
||||
"""
|
||||
@ -114,7 +114,7 @@ def _no_deps(x):
|
||||
return []
|
||||
|
||||
|
||||
class State(object):
|
||||
class State:
|
||||
"""
|
||||
Holds the state of a partially-complete parallel operation.
|
||||
|
||||
@ -136,7 +136,7 @@ class State(object):
|
||||
return set(self.objects) - self.started - self.finished - self.failed
|
||||
|
||||
|
||||
class NoLimit(object):
|
||||
class NoLimit:
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
@ -252,7 +252,7 @@ class UpstreamError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ParallelStreamWriter(object):
|
||||
class ParallelStreamWriter:
|
||||
"""Write out messages for operations happening in parallel.
|
||||
|
||||
Each operation has its own line, and ANSI code characters are used
|
||||
|
@ -79,19 +79,19 @@ def print_output_event(event, stream, is_terminal):
|
||||
status = event.get('status', '')
|
||||
|
||||
if 'progress' in event:
|
||||
write_to_stream("%s %s%s" % (status, event['progress'], terminator), stream)
|
||||
write_to_stream("{} {}{}".format(status, event['progress'], terminator), stream)
|
||||
elif 'progressDetail' in event:
|
||||
detail = event['progressDetail']
|
||||
total = detail.get('total')
|
||||
if 'current' in detail and total:
|
||||
percentage = float(detail['current']) / float(total) * 100
|
||||
write_to_stream('%s (%.1f%%)%s' % (status, percentage, terminator), stream)
|
||||
write_to_stream('{} ({:.1f}%){}'.format(status, percentage, terminator), stream)
|
||||
else:
|
||||
write_to_stream('%s%s' % (status, terminator), stream)
|
||||
write_to_stream('{}{}'.format(status, terminator), stream)
|
||||
elif 'stream' in event:
|
||||
write_to_stream("%s%s" % (event['stream'], terminator), stream)
|
||||
write_to_stream("{}{}".format(event['stream'], terminator), stream)
|
||||
else:
|
||||
write_to_stream("%s%s\n" % (status, terminator), stream)
|
||||
write_to_stream("{}{}\n".format(status, terminator), stream)
|
||||
|
||||
|
||||
def get_digest_from_pull(events):
|
||||
|
@ -55,16 +55,16 @@ class OneOffFilter(enum.Enum):
|
||||
@classmethod
|
||||
def update_labels(cls, value, labels):
|
||||
if value == cls.only:
|
||||
labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True"))
|
||||
labels.append('{}={}'.format(LABEL_ONE_OFF, "True"))
|
||||
elif value == cls.exclude:
|
||||
labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False"))
|
||||
labels.append('{}={}'.format(LABEL_ONE_OFF, "False"))
|
||||
elif value == cls.include:
|
||||
pass
|
||||
else:
|
||||
raise ValueError("Invalid value for one_off: {}".format(repr(value)))
|
||||
|
||||
|
||||
class Project(object):
|
||||
class Project:
|
||||
"""
|
||||
A collection of services.
|
||||
"""
|
||||
@ -80,7 +80,7 @@ class Project(object):
|
||||
name = self.name
|
||||
if legacy:
|
||||
name = re.sub(r'[_-]', '', name)
|
||||
labels = ['{0}={1}'.format(LABEL_PROJECT, name)]
|
||||
labels = ['{}={}'.format(LABEL_PROJECT, name)]
|
||||
|
||||
OneOffFilter.update_labels(one_off, labels)
|
||||
return labels
|
||||
@ -549,10 +549,10 @@ class Project(object):
|
||||
'action': event['status'],
|
||||
'id': event['Actor']['ID'],
|
||||
'service': container_attrs.get(LABEL_SERVICE),
|
||||
'attributes': dict([
|
||||
(k, v) for k, v in container_attrs.items()
|
||||
'attributes': {
|
||||
k: v for k, v in container_attrs.items()
|
||||
if not k.startswith('com.docker.compose.')
|
||||
]),
|
||||
},
|
||||
'container': container,
|
||||
}
|
||||
|
||||
@ -812,7 +812,7 @@ class Project(object):
|
||||
return
|
||||
if remove_orphans:
|
||||
for ctnr in orphans:
|
||||
log.info('Removing orphan container "{0}"'.format(ctnr.name))
|
||||
log.info('Removing orphan container "{}"'.format(ctnr.name))
|
||||
try:
|
||||
ctnr.kill()
|
||||
except APIError:
|
||||
@ -820,7 +820,7 @@ class Project(object):
|
||||
ctnr.remove(force=True)
|
||||
else:
|
||||
log.warning(
|
||||
'Found orphan containers ({0}) for this project. If '
|
||||
'Found orphan containers ({}) for this project. If '
|
||||
'you removed or renamed this service in your compose '
|
||||
'file, you can run this command with the '
|
||||
'--remove-orphans flag to clean it up.'.format(
|
||||
@ -966,16 +966,16 @@ def get_secrets(service, service_secrets, secret_defs):
|
||||
.format(service=service, secret=secret.source))
|
||||
|
||||
if secret_def.get('external'):
|
||||
log.warning("Service \"{service}\" uses secret \"{secret}\" which is external. "
|
||||
"External secrets are not available to containers created by "
|
||||
"docker-compose.".format(service=service, secret=secret.source))
|
||||
log.warning('Service "{service}" uses secret "{secret}" which is external. '
|
||||
'External secrets are not available to containers created by '
|
||||
'docker-compose.'.format(service=service, secret=secret.source))
|
||||
continue
|
||||
|
||||
if secret.uid or secret.gid or secret.mode:
|
||||
log.warning(
|
||||
"Service \"{service}\" uses secret \"{secret}\" with uid, "
|
||||
"gid, or mode. These fields are not supported by this "
|
||||
"implementation of the Compose file".format(
|
||||
'Service "{service}" uses secret "{secret}" with uid, '
|
||||
'gid, or mode. These fields are not supported by this '
|
||||
'implementation of the Compose file'.format(
|
||||
service=service, secret=secret.source
|
||||
)
|
||||
)
|
||||
@ -983,8 +983,8 @@ def get_secrets(service, service_secrets, secret_defs):
|
||||
secret_file = secret_def.get('file')
|
||||
if not path.isfile(str(secret_file)):
|
||||
log.warning(
|
||||
"Service \"{service}\" uses an undefined secret file \"{secret_file}\", "
|
||||
"the following file should be created \"{secret_file}\"".format(
|
||||
'Service "{service}" uses an undefined secret file "{secret_file}", '
|
||||
'the following file should be created "{secret_file}"'.format(
|
||||
service=service, secret_file=secret_file
|
||||
)
|
||||
)
|
||||
|
@ -163,7 +163,7 @@ class BuildAction(enum.Enum):
|
||||
skip = 2
|
||||
|
||||
|
||||
class Service(object):
|
||||
class Service:
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
@ -230,10 +230,10 @@ class Service(object):
|
||||
"""Return a :class:`compose.container.Container` for this service. The
|
||||
container must be active, and match `number`.
|
||||
"""
|
||||
for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
|
||||
for container in self.containers(labels=['{}={}'.format(LABEL_CONTAINER_NUMBER, number)]):
|
||||
return container
|
||||
|
||||
raise ValueError("No container found for %s_%s" % (self.name, number))
|
||||
raise ValueError("No container found for {}_{}".format(self.name, number))
|
||||
|
||||
def start(self, **options):
|
||||
containers = self.containers(stopped=True)
|
||||
@ -642,7 +642,7 @@ class Service(object):
|
||||
expl = binarystr_to_unicode(ex.explanation)
|
||||
if "driver failed programming external connectivity" in expl:
|
||||
log.warn("Host is already in use by another container")
|
||||
raise OperationFailedError("Cannot start service %s: %s" % (self.name, expl))
|
||||
raise OperationFailedError("Cannot start service {}: {}".format(self.name, expl))
|
||||
return container
|
||||
|
||||
@property
|
||||
@ -736,12 +736,12 @@ class Service(object):
|
||||
pid_namespace = self.pid_mode.service_name
|
||||
ipc_namespace = self.ipc_mode.service_name
|
||||
|
||||
configs = dict(
|
||||
[(name, None) for name in self.get_linked_service_names()]
|
||||
configs = {
|
||||
name: None for name in self.get_linked_service_names()
|
||||
}
|
||||
configs.update(
|
||||
(name, None) for name in self.get_volumes_from_names()
|
||||
)
|
||||
configs.update(dict(
|
||||
[(name, None) for name in self.get_volumes_from_names()]
|
||||
))
|
||||
configs.update({net_name: None} if net_name else {})
|
||||
configs.update({pid_namespace: None} if pid_namespace else {})
|
||||
configs.update({ipc_namespace: None} if ipc_namespace else {})
|
||||
@ -863,9 +863,9 @@ class Service(object):
|
||||
add_config_hash = (not one_off and not override_options)
|
||||
slug = generate_random_id() if one_off else None
|
||||
|
||||
container_options = dict(
|
||||
(k, self.options[k])
|
||||
for k in DOCKER_CONFIG_KEYS if k in self.options)
|
||||
container_options = {
|
||||
k: self.options[k]
|
||||
for k in DOCKER_CONFIG_KEYS if k in self.options}
|
||||
override_volumes = override_options.pop('volumes', [])
|
||||
container_options.update(override_options)
|
||||
|
||||
@ -957,7 +957,7 @@ class Service(object):
|
||||
)
|
||||
container_options['environment'].update(affinity)
|
||||
|
||||
container_options['volumes'] = dict((v.internal, {}) for v in container_volumes or {})
|
||||
container_options['volumes'] = {v.internal: {} for v in container_volumes or {}}
|
||||
if version_gte(self.client.api_version, '1.30'):
|
||||
override_options['mounts'] = [build_mount(v) for v in container_mounts] or None
|
||||
else:
|
||||
@ -1159,9 +1159,9 @@ class Service(object):
|
||||
def labels(self, one_off=False, legacy=False):
|
||||
proj_name = self.project if not legacy else re.sub(r'[_-]', '', self.project)
|
||||
return [
|
||||
'{0}={1}'.format(LABEL_PROJECT, proj_name),
|
||||
'{0}={1}'.format(LABEL_SERVICE, self.name),
|
||||
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
|
||||
'{}={}'.format(LABEL_PROJECT, proj_name),
|
||||
'{}={}'.format(LABEL_SERVICE, self.name),
|
||||
'{}={}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
|
||||
]
|
||||
|
||||
@property
|
||||
@ -1178,7 +1178,7 @@ class Service(object):
|
||||
ext_links_origins = [link.split(':')[0] for link in self.options.get('external_links', [])]
|
||||
if container_name in ext_links_origins:
|
||||
raise DependencyError(
|
||||
'Service {0} has a self-referential external link: {1}'.format(
|
||||
'Service {} has a self-referential external link: {}'.format(
|
||||
self.name, container_name
|
||||
)
|
||||
)
|
||||
@ -1233,11 +1233,9 @@ class Service(object):
|
||||
output = self.client.pull(repo, **pull_kwargs)
|
||||
if silent:
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
for event in stream_output(output, devnull):
|
||||
yield event
|
||||
yield from stream_output(output, devnull)
|
||||
else:
|
||||
for event in stream_output(output, sys.stdout):
|
||||
yield event
|
||||
yield from stream_output(output, sys.stdout)
|
||||
except (StreamOutputError, NotFound) as e:
|
||||
if not ignore_pull_failures:
|
||||
raise
|
||||
@ -1255,7 +1253,7 @@ class Service(object):
|
||||
'platform': self.platform,
|
||||
}
|
||||
if not silent:
|
||||
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
|
||||
log.info('Pulling {} ({}{}{})...'.format(self.name, repo, separator, tag))
|
||||
|
||||
if kwargs['platform'] and version_lt(self.client.api_version, '1.35'):
|
||||
raise OperationFailedError(
|
||||
@ -1273,7 +1271,7 @@ class Service(object):
|
||||
|
||||
repo, tag, separator = parse_repository_tag(self.options['image'])
|
||||
tag = tag or 'latest'
|
||||
log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
|
||||
log.info('Pushing {} ({}{}{})...'.format(self.name, repo, separator, tag))
|
||||
output = self.client.push(repo, tag=tag, stream=True)
|
||||
|
||||
try:
|
||||
@ -1335,7 +1333,7 @@ def short_id_alias_exists(container, network):
|
||||
return container.short_id in aliases
|
||||
|
||||
|
||||
class IpcMode(object):
|
||||
class IpcMode:
|
||||
def __init__(self, mode):
|
||||
self._mode = mode
|
||||
|
||||
@ -1375,7 +1373,7 @@ class ContainerIpcMode(IpcMode):
|
||||
self._mode = 'container:{}'.format(container.id)
|
||||
|
||||
|
||||
class PidMode(object):
|
||||
class PidMode:
|
||||
def __init__(self, mode):
|
||||
self._mode = mode
|
||||
|
||||
@ -1415,7 +1413,7 @@ class ContainerPidMode(PidMode):
|
||||
self._mode = 'container:{}'.format(container.id)
|
||||
|
||||
|
||||
class NetworkMode(object):
|
||||
class NetworkMode:
|
||||
"""A `standard` network mode (ex: host, bridge)"""
|
||||
|
||||
service_name = None
|
||||
@ -1430,7 +1428,7 @@ class NetworkMode(object):
|
||||
mode = id
|
||||
|
||||
|
||||
class ContainerNetworkMode(object):
|
||||
class ContainerNetworkMode:
|
||||
"""A network mode that uses a container's network stack."""
|
||||
|
||||
service_name = None
|
||||
@ -1447,7 +1445,7 @@ class ContainerNetworkMode(object):
|
||||
return 'container:' + self.container.id
|
||||
|
||||
|
||||
class ServiceNetworkMode(object):
|
||||
class ServiceNetworkMode:
|
||||
"""A network mode that uses a service's network stack."""
|
||||
|
||||
def __init__(self, service):
|
||||
@ -1552,10 +1550,10 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
|
||||
volumes = []
|
||||
volumes_option = volumes_option or []
|
||||
|
||||
container_mounts = dict(
|
||||
(mount['Destination'], mount)
|
||||
container_mounts = {
|
||||
mount['Destination']: mount
|
||||
for mount in container.get('Mounts') or {}
|
||||
)
|
||||
}
|
||||
|
||||
image_volumes = [
|
||||
VolumeSpec.parse(volume)
|
||||
@ -1607,9 +1605,9 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
|
||||
|
||||
|
||||
def warn_on_masked_volume(volumes_option, container_volumes, service):
|
||||
container_volumes = dict(
|
||||
(volume.internal, volume.external)
|
||||
for volume in container_volumes)
|
||||
container_volumes = {
|
||||
volume.internal: volume.external
|
||||
for volume in container_volumes}
|
||||
|
||||
for volume in volumes_option:
|
||||
if (
|
||||
@ -1759,7 +1757,7 @@ def convert_blkio_config(blkio_config):
|
||||
continue
|
||||
arr = []
|
||||
for item in blkio_config[field]:
|
||||
arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
|
||||
arr.append({k.capitalize(): v for k, v in item.items()})
|
||||
result[field] = arr
|
||||
return result
|
||||
|
||||
@ -1771,7 +1769,7 @@ def rewrite_build_path(path):
|
||||
return path
|
||||
|
||||
|
||||
class _CLIBuilder(object):
|
||||
class _CLIBuilder:
|
||||
def __init__(self, progress):
|
||||
self._progress = progress
|
||||
|
||||
@ -1879,7 +1877,7 @@ class _CLIBuilder(object):
|
||||
yield json.dumps({"stream": "{}{}\n".format(magic_word, image_id)})
|
||||
|
||||
|
||||
class _CommandBuilder(object):
|
||||
class _CommandBuilder:
|
||||
def __init__(self):
|
||||
self._args = ["docker", "build"]
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
timeparse.py
|
||||
(c) Will Roberts <wildwilhelm@gmail.com> 1 February, 2014
|
||||
@ -54,14 +53,14 @@ TIMEFORMAT = r'{HOURS}{MINS}{SECS}{MILLI}{MICRO}{NANO}'.format(
|
||||
NANO=opt(NANO),
|
||||
)
|
||||
|
||||
MULTIPLIERS = dict([
|
||||
('hours', 60 * 60),
|
||||
('mins', 60),
|
||||
('secs', 1),
|
||||
('milli', 1.0 / 1000),
|
||||
('micro', 1.0 / 1000.0 / 1000),
|
||||
('nano', 1.0 / 1000.0 / 1000.0 / 1000.0),
|
||||
])
|
||||
MULTIPLIERS = {
|
||||
'hours': 60 * 60,
|
||||
'mins': 60,
|
||||
'secs': 1,
|
||||
'milli': 1.0 / 1000,
|
||||
'micro': 1.0 / 1000.0 / 1000,
|
||||
'nano': 1.0 / 1000.0 / 1000.0 / 1000.0,
|
||||
}
|
||||
|
||||
|
||||
def timeparse(sval):
|
||||
@ -90,4 +89,4 @@ def timeparse(sval):
|
||||
|
||||
|
||||
def cast(value):
|
||||
return int(value, 10) if value.isdigit() else float(value)
|
||||
return int(value) if value.isdigit() else float(value)
|
||||
|
@ -29,7 +29,7 @@ def stream_as_text(stream):
|
||||
yield data
|
||||
|
||||
|
||||
def line_splitter(buffer, separator=u'\n'):
|
||||
def line_splitter(buffer, separator='\n'):
|
||||
index = buffer.find(str(separator))
|
||||
if index == -1:
|
||||
return None
|
||||
@ -45,7 +45,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
|
||||
of the input.
|
||||
"""
|
||||
splitter = splitter or line_splitter
|
||||
buffered = str('')
|
||||
buffered = ''
|
||||
|
||||
for data in stream_as_text(stream):
|
||||
buffered += data
|
||||
@ -116,7 +116,7 @@ def parse_nanoseconds_int(value):
|
||||
|
||||
|
||||
def build_string_dict(source_dict):
|
||||
return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
|
||||
return {k: str(v if v is not None else '') for k, v in source_dict.items()}
|
||||
|
||||
|
||||
def splitdrive(path):
|
||||
|
@ -1,5 +1,6 @@
|
||||
import logging
|
||||
import re
|
||||
from itertools import chain
|
||||
|
||||
from docker.errors import NotFound
|
||||
from docker.utils import version_lt
|
||||
@ -15,7 +16,7 @@ from .const import LABEL_VOLUME
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Volume(object):
|
||||
class Volume:
|
||||
def __init__(self, client, project, name, driver=None, driver_opts=None,
|
||||
external=False, labels=None, custom_name=False):
|
||||
self.client = client
|
||||
@ -57,13 +58,13 @@ class Volume(object):
|
||||
def full_name(self):
|
||||
if self.custom_name:
|
||||
return self.name
|
||||
return '{0}_{1}'.format(self.project.lstrip('-_'), self.name)
|
||||
return '{}_{}'.format(self.project.lstrip('-_'), self.name)
|
||||
|
||||
@property
|
||||
def legacy_full_name(self):
|
||||
if self.custom_name:
|
||||
return self.name
|
||||
return '{0}_{1}'.format(
|
||||
return '{}_{}'.format(
|
||||
re.sub(r'[_-]', '', self.project), self.name
|
||||
)
|
||||
|
||||
@ -96,7 +97,7 @@ class Volume(object):
|
||||
self.legacy = False
|
||||
|
||||
|
||||
class ProjectVolumes(object):
|
||||
class ProjectVolumes:
|
||||
|
||||
def __init__(self, volumes):
|
||||
self.volumes = volumes
|
||||
@ -132,7 +133,7 @@ class ProjectVolumes(object):
|
||||
volume_exists = volume.exists()
|
||||
if volume.external:
|
||||
log.debug(
|
||||
'Volume {0} declared as external. No new '
|
||||
'Volume {} declared as external. No new '
|
||||
'volume will be created.'.format(volume.name)
|
||||
)
|
||||
if not volume_exists:
|
||||
@ -148,7 +149,7 @@ class ProjectVolumes(object):
|
||||
|
||||
if not volume_exists:
|
||||
log.info(
|
||||
'Creating volume "{0}" with {1} driver'.format(
|
||||
'Creating volume "{}" with {} driver'.format(
|
||||
volume.full_name, volume.driver or 'default'
|
||||
)
|
||||
)
|
||||
@ -157,7 +158,7 @@ class ProjectVolumes(object):
|
||||
check_remote_volume_config(volume.inspect(legacy=volume.legacy), volume)
|
||||
except NotFound:
|
||||
raise ConfigurationError(
|
||||
'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver)
|
||||
'Volume {} specifies nonexistent driver {}'.format(volume.name, volume.driver)
|
||||
)
|
||||
|
||||
def namespace_spec(self, volume_spec):
|
||||
@ -174,7 +175,7 @@ class ProjectVolumes(object):
|
||||
|
||||
class VolumeConfigChangedError(ConfigurationError):
|
||||
def __init__(self, local, property_name, local_value, remote_value):
|
||||
super(VolumeConfigChangedError, self).__init__(
|
||||
super().__init__(
|
||||
'Configuration for volume {vol_name} specifies {property_name} '
|
||||
'{local_value}, but a volume with the same name uses a different '
|
||||
'{property_name} ({remote_value}). If you wish to use the new '
|
||||
@ -192,7 +193,7 @@ def check_remote_volume_config(remote, local):
|
||||
raise VolumeConfigChangedError(local, 'driver', local.driver, remote.get('Driver'))
|
||||
local_opts = local.driver_opts or {}
|
||||
remote_opts = remote.get('Options') or {}
|
||||
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
|
||||
for k in set(chain(remote_opts, local_opts)):
|
||||
if k.startswith('com.docker.'): # These options are set internally
|
||||
continue
|
||||
if remote_opts.get(k) != local_opts.get(k):
|
||||
@ -202,7 +203,7 @@ def check_remote_volume_config(remote, local):
|
||||
|
||||
local_labels = local.labels or {}
|
||||
remote_labels = remote.get('Labels') or {}
|
||||
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
|
||||
for k in set(chain(remote_labels, local_labels)):
|
||||
if k.startswith('com.docker.'): # We are only interested in user-specified labels
|
||||
continue
|
||||
if remote_labels.get(k) != local_labels.get(k):
|
||||
|
@ -156,7 +156,7 @@ def main(args):
|
||||
|
||||
opts = parse_opts(args)
|
||||
|
||||
with open(opts.filename, 'r') as fh:
|
||||
with open(opts.filename) as fh:
|
||||
new_format = migrate(fh.read())
|
||||
|
||||
if opts.in_place:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
# -*- mode: python -*-
|
||||
|
||||
block_cipher = None
|
||||
|
||||
|
@ -8,8 +8,6 @@ docker==4.3.0
|
||||
docker-pycreds==0.4.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6; python_version < '3.4'
|
||||
functools32==3.2.3.post2; python_version < '3.2'
|
||||
idna==2.10
|
||||
ipaddress==1.0.23
|
||||
jsonschema==3.2.0
|
||||
|
@ -6,7 +6,7 @@ from const import REPO_ROOT
|
||||
|
||||
def update_init_py_version(version):
|
||||
path = os.path.join(REPO_ROOT, 'compose', '__init__.py')
|
||||
with open(path, 'r') as f:
|
||||
with open(path) as f:
|
||||
contents = f.read()
|
||||
contents = re.sub(r"__version__ = '[0-9a-z.-]+'", "__version__ = '{}'".format(version), contents)
|
||||
with open(path, 'w') as f:
|
||||
@ -15,7 +15,7 @@ def update_init_py_version(version):
|
||||
|
||||
def update_run_sh_version(version):
|
||||
path = os.path.join(REPO_ROOT, 'script', 'run', 'run.sh')
|
||||
with open(path, 'r') as f:
|
||||
with open(path) as f:
|
||||
contents = f.read()
|
||||
contents = re.sub(r'VERSION="[0-9a-z.-]+"', 'VERSION="{}"'.format(version), contents)
|
||||
with open(path, 'w') as f:
|
||||
|
7
setup.py
7
setup.py
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
import codecs
|
||||
import os
|
||||
import re
|
||||
@ -50,11 +49,7 @@ if sys.version_info[:2] < (3, 4):
|
||||
tests_require.append('mock >= 1.0.1, < 4')
|
||||
|
||||
extras_require = {
|
||||
':python_version < "3.2"': ['subprocess32 >= 3.5.4, < 4'],
|
||||
':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
|
||||
':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5, < 4'],
|
||||
':python_version < "3.3"': ['backports.shutil_get_terminal_size == 1.0.0',
|
||||
'ipaddress >= 1.0.16, < 2'],
|
||||
':sys_platform == "win32"': ['colorama >= 0.4, < 1'],
|
||||
'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
|
||||
'tests': tests_require,
|
||||
@ -94,7 +89,7 @@ setup(
|
||||
install_requires=install_requires,
|
||||
extras_require=extras_require,
|
||||
tests_require=tests_require,
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||
python_requires='>=3.4',
|
||||
entry_points={
|
||||
'console_scripts': ['docker-compose=compose.cli.main:main'],
|
||||
},
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import datetime
|
||||
import json
|
||||
import os.path
|
||||
@ -99,7 +98,7 @@ def kill_service(service):
|
||||
container.kill()
|
||||
|
||||
|
||||
class ContainerCountCondition(object):
|
||||
class ContainerCountCondition:
|
||||
|
||||
def __init__(self, project, expected):
|
||||
self.project = project
|
||||
@ -112,7 +111,7 @@ class ContainerCountCondition(object):
|
||||
return "waiting for counter count == %s" % self.expected
|
||||
|
||||
|
||||
class ContainerStateCondition(object):
|
||||
class ContainerStateCondition:
|
||||
|
||||
def __init__(self, client, name, status):
|
||||
self.client = client
|
||||
@ -140,7 +139,7 @@ class ContainerStateCondition(object):
|
||||
class CLITestCase(DockerClientTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(CLITestCase, self).setUp()
|
||||
super().setUp()
|
||||
self.base_dir = 'tests/fixtures/simple-composefile'
|
||||
self.override_dir = None
|
||||
|
||||
@ -162,7 +161,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
if hasattr(self, '_project'):
|
||||
del self._project
|
||||
|
||||
super(CLITestCase, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
@property
|
||||
def project(self):
|
||||
@ -206,14 +205,14 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
def test_shorthand_host_opt(self):
|
||||
self.dispatch(
|
||||
['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
|
||||
['-H={}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
|
||||
'up', '-d'],
|
||||
returncode=0
|
||||
)
|
||||
|
||||
def test_shorthand_host_opt_interactive(self):
|
||||
self.dispatch(
|
||||
['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
|
||||
['-H={}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
|
||||
'run', 'another', 'ls'],
|
||||
returncode=0
|
||||
)
|
||||
@ -1453,7 +1452,7 @@ services:
|
||||
if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
|
||||
]
|
||||
|
||||
assert set([v['Name'].split('/')[-1] for v in volumes]) == {volume_with_label}
|
||||
assert {v['Name'].split('/')[-1] for v in volumes} == {volume_with_label}
|
||||
assert 'label_key' in volumes[0]['Labels']
|
||||
assert volumes[0]['Labels']['label_key'] == 'label_val'
|
||||
|
||||
@ -1866,12 +1865,12 @@ services:
|
||||
self.dispatch(['run', 'implicit'])
|
||||
service = self.project.get_service('implicit')
|
||||
containers = service.containers(stopped=True, one_off=OneOffFilter.only)
|
||||
assert [c.human_readable_command for c in containers] == [u'/bin/sh -c echo "success"']
|
||||
assert [c.human_readable_command for c in containers] == ['/bin/sh -c echo "success"']
|
||||
|
||||
self.dispatch(['run', 'explicit'])
|
||||
service = self.project.get_service('explicit')
|
||||
containers = service.containers(stopped=True, one_off=OneOffFilter.only)
|
||||
assert [c.human_readable_command for c in containers] == [u'/bin/true']
|
||||
assert [c.human_readable_command for c in containers] == ['/bin/true']
|
||||
|
||||
@pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/<id> bug')
|
||||
def test_run_rm(self):
|
||||
@ -2701,7 +2700,7 @@ services:
|
||||
str_iso_date, str_iso_time, container_info = string.split(' ', 2)
|
||||
try:
|
||||
return isinstance(datetime.datetime.strptime(
|
||||
'%s %s' % (str_iso_date, str_iso_time),
|
||||
'{} {}'.format(str_iso_date, str_iso_time),
|
||||
'%Y-%m-%d %H:%M:%S.%f'),
|
||||
datetime.datetime)
|
||||
except ValueError:
|
||||
@ -2790,7 +2789,7 @@ services:
|
||||
self.base_dir = 'tests/fixtures/extends'
|
||||
self.dispatch(['up', '-d'], None)
|
||||
|
||||
assert set([s.name for s in self.project.services]) == {'mydb', 'myweb'}
|
||||
assert {s.name for s in self.project.services} == {'mydb', 'myweb'}
|
||||
|
||||
# Sort by name so we get [db, web]
|
||||
containers = sorted(
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import shutil
|
||||
import unittest
|
||||
|
@ -49,7 +49,7 @@ def create_custom_host_file(client, filename, content):
|
||||
|
||||
|
||||
def create_host_file(client, filename):
|
||||
with open(filename, 'r') as fh:
|
||||
with open(filename) as fh:
|
||||
content = fh.read()
|
||||
|
||||
return create_custom_host_file(client, filename, content)
|
||||
|
@ -15,7 +15,7 @@ from tests.integration.testcases import DockerClientTestCase
|
||||
class EnvironmentTest(DockerClientTestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(EnvironmentTest, cls).setUpClass()
|
||||
super().setUpClass()
|
||||
cls.compose_file = tempfile.NamedTemporaryFile(mode='w+b')
|
||||
cls.compose_file.write(bytes("""version: '3.2'
|
||||
services:
|
||||
@ -27,7 +27,7 @@ services:
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
super(EnvironmentTest, cls).tearDownClass()
|
||||
super().tearDownClass()
|
||||
cls.compose_file.close()
|
||||
|
||||
@data('events',
|
||||
|
@ -289,19 +289,19 @@ class ProjectTest(DockerClientTestCase):
|
||||
db_container = db.create_container()
|
||||
|
||||
project.start(service_names=['web'])
|
||||
assert set(c.name for c in project.containers() if c.is_running) == {
|
||||
assert {c.name for c in project.containers() if c.is_running} == {
|
||||
web_container_1.name, web_container_2.name}
|
||||
|
||||
project.start()
|
||||
assert set(c.name for c in project.containers() if c.is_running) == {
|
||||
assert {c.name for c in project.containers() if c.is_running} == {
|
||||
web_container_1.name, web_container_2.name, db_container.name}
|
||||
|
||||
project.pause(service_names=['web'])
|
||||
assert set([c.name for c in project.containers() if c.is_paused]) == {
|
||||
assert {c.name for c in project.containers() if c.is_paused} == {
|
||||
web_container_1.name, web_container_2.name}
|
||||
|
||||
project.pause()
|
||||
assert set([c.name for c in project.containers() if c.is_paused]) == {
|
||||
assert {c.name for c in project.containers() if c.is_paused} == {
|
||||
web_container_1.name, web_container_2.name, db_container.name}
|
||||
|
||||
project.unpause(service_names=['db'])
|
||||
@ -311,7 +311,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
assert len([c.name for c in project.containers() if c.is_paused]) == 0
|
||||
|
||||
project.stop(service_names=['web'], timeout=1)
|
||||
assert set(c.name for c in project.containers() if c.is_running) == {db_container.name}
|
||||
assert {c.name for c in project.containers() if c.is_running} == {db_container.name}
|
||||
|
||||
project.kill(service_names=['db'])
|
||||
assert len([c for c in project.containers() if c.is_running]) == 0
|
||||
@ -1177,8 +1177,8 @@ class ProjectTest(DockerClientTestCase):
|
||||
assert networks[0]['Labels']['label_key'] == 'label_val'
|
||||
|
||||
def test_project_up_volumes(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
'name': 'web',
|
||||
@ -1232,9 +1232,9 @@ class ProjectTest(DockerClientTestCase):
|
||||
if v['Name'].split('/')[-1].startswith('composetest_')
|
||||
]
|
||||
|
||||
assert set([v['Name'].split('/')[-1] for v in volumes]) == set(
|
||||
['composetest_{}'.format(volume_name)]
|
||||
)
|
||||
assert {v['Name'].split('/')[-1] for v in volumes} == {
|
||||
'composetest_{}'.format(volume_name)
|
||||
}
|
||||
|
||||
assert 'label_key' in volumes[0]['Labels']
|
||||
assert volumes[0]['Labels']['label_key'] == 'label_val'
|
||||
@ -1348,8 +1348,8 @@ class ProjectTest(DockerClientTestCase):
|
||||
assert len(project.containers()) == 3
|
||||
|
||||
def test_initialize_volumes(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
'name': 'web',
|
||||
@ -1370,8 +1370,8 @@ class ProjectTest(DockerClientTestCase):
|
||||
assert volume_data['Driver'] == 'local'
|
||||
|
||||
def test_project_up_implicit_volume_driver(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
'name': 'web',
|
||||
@ -1479,7 +1479,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
assert output == b"This is the secret\n"
|
||||
|
||||
def test_initialize_volumes_invalid_volume_driver(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
|
||||
config_data = build_config(
|
||||
version=VERSION,
|
||||
@ -1500,8 +1500,8 @@ class ProjectTest(DockerClientTestCase):
|
||||
|
||||
@no_cluster('inspect volume by name defect on Swarm Classic')
|
||||
def test_initialize_volumes_updated_driver(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
@ -1531,14 +1531,14 @@ class ProjectTest(DockerClientTestCase):
|
||||
)
|
||||
with pytest.raises(config.ConfigurationError) as e:
|
||||
project.volumes.initialize()
|
||||
assert 'Configuration for volume {0} specifies driver smb'.format(
|
||||
assert 'Configuration for volume {} specifies driver smb'.format(
|
||||
vol_name
|
||||
) in str(e.value)
|
||||
|
||||
@no_cluster('inspect volume by name defect on Swarm Classic')
|
||||
def test_initialize_volumes_updated_driver_opts(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
tmpdir = tempfile.mkdtemp(prefix='compose_test_')
|
||||
self.addCleanup(shutil.rmtree, tmpdir)
|
||||
driver_opts = {'o': 'bind', 'device': tmpdir, 'type': 'none'}
|
||||
@ -1575,13 +1575,13 @@ class ProjectTest(DockerClientTestCase):
|
||||
)
|
||||
with pytest.raises(config.ConfigurationError) as e:
|
||||
project.volumes.initialize()
|
||||
assert 'Configuration for volume {0} specifies "device" driver_opt {1}'.format(
|
||||
assert 'Configuration for volume {} specifies "device" driver_opt {}'.format(
|
||||
vol_name, driver_opts['device']
|
||||
) in str(e.value)
|
||||
|
||||
def test_initialize_volumes_updated_blank_driver(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
@ -1617,8 +1617,8 @@ class ProjectTest(DockerClientTestCase):
|
||||
@no_cluster('inspect volume by name defect on Swarm Classic')
|
||||
def test_initialize_volumes_external_volumes(self):
|
||||
# Use composetest_ prefix so it gets garbage-collected in tearDown()
|
||||
vol_name = 'composetest_{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = 'composetest_{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
self.client.create_volume(vol_name)
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
@ -1640,7 +1640,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.client.inspect_volume(full_vol_name)
|
||||
|
||||
def test_initialize_volumes_inexistent_external_volume(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
@ -1658,13 +1658,13 @@ class ProjectTest(DockerClientTestCase):
|
||||
)
|
||||
with pytest.raises(config.ConfigurationError) as e:
|
||||
project.volumes.initialize()
|
||||
assert 'Volume {0} declared as external'.format(
|
||||
assert 'Volume {} declared as external'.format(
|
||||
vol_name
|
||||
) in str(e.value)
|
||||
|
||||
def test_project_up_named_volumes_in_binds(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{0}'.format(vol_name)
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
|
||||
base_file = config.ConfigFile(
|
||||
'base.yml',
|
||||
@ -1673,7 +1673,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
'simple': {
|
||||
'image': BUSYBOX_IMAGE_WITH_TAG,
|
||||
'command': 'top',
|
||||
'volumes': ['{0}:/data'.format(vol_name)]
|
||||
'volumes': ['{}:/data'.format(vol_name)]
|
||||
},
|
||||
},
|
||||
'volumes': {
|
||||
|
@ -22,7 +22,7 @@ class ResilienceTest(DockerClientTestCase):
|
||||
def tearDown(self):
|
||||
del self.project
|
||||
del self.db
|
||||
super(ResilienceTest, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_successful_recreate(self):
|
||||
self.project.up(strategy=ConvergenceStrategy.always)
|
||||
|
@ -248,7 +248,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
service = self.create_service('db', security_opt=security_opt)
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
assert set(container.get('HostConfig.SecurityOpt')) == set([o.repr() for o in security_opt])
|
||||
assert set(container.get('HostConfig.SecurityOpt')) == {o.repr() for o in security_opt}
|
||||
|
||||
@pytest.mark.xfail(True, reason='Not supported on most drivers')
|
||||
def test_create_container_with_storage_opt(self):
|
||||
@ -290,7 +290,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
actual_host_path = container.get_mount(container_path)['Source']
|
||||
|
||||
assert path.basename(actual_host_path) == path.basename(host_path), (
|
||||
"Last component differs: %s, %s" % (actual_host_path, host_path)
|
||||
"Last component differs: {}, {}".format(actual_host_path, host_path)
|
||||
)
|
||||
|
||||
def test_create_container_with_host_mount(self):
|
||||
@ -844,11 +844,11 @@ class ServiceTest(DockerClientTestCase):
|
||||
db2 = create_and_start_container(db)
|
||||
create_and_start_container(web)
|
||||
|
||||
assert set(get_links(web.containers()[0])) == set([
|
||||
assert set(get_links(web.containers()[0])) == {
|
||||
db1.name, db1.name_without_project,
|
||||
db2.name, db2.name_without_project,
|
||||
'db'
|
||||
])
|
||||
}
|
||||
|
||||
@no_cluster('No legacy links support in Swarm')
|
||||
def test_start_container_creates_links_with_names(self):
|
||||
@ -859,11 +859,11 @@ class ServiceTest(DockerClientTestCase):
|
||||
db2 = create_and_start_container(db)
|
||||
create_and_start_container(web)
|
||||
|
||||
assert set(get_links(web.containers()[0])) == set([
|
||||
assert set(get_links(web.containers()[0])) == {
|
||||
db1.name, db1.name_without_project,
|
||||
db2.name, db2.name_without_project,
|
||||
'custom_link_name'
|
||||
])
|
||||
}
|
||||
|
||||
@no_cluster('No legacy links support in Swarm')
|
||||
def test_start_container_with_external_links(self):
|
||||
@ -879,11 +879,11 @@ class ServiceTest(DockerClientTestCase):
|
||||
|
||||
create_and_start_container(web)
|
||||
|
||||
assert set(get_links(web.containers()[0])) == set([
|
||||
assert set(get_links(web.containers()[0])) == {
|
||||
db_ctnrs[0].name,
|
||||
db_ctnrs[1].name,
|
||||
'db_3'
|
||||
])
|
||||
}
|
||||
|
||||
@no_cluster('No legacy links support in Swarm')
|
||||
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
|
||||
@ -893,7 +893,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
create_and_start_container(db)
|
||||
|
||||
c = create_and_start_container(db)
|
||||
assert set(get_links(c)) == set([])
|
||||
assert set(get_links(c)) == set()
|
||||
|
||||
@no_cluster('No legacy links support in Swarm')
|
||||
def test_start_one_off_container_creates_links_to_its_own_service(self):
|
||||
@ -904,11 +904,11 @@ class ServiceTest(DockerClientTestCase):
|
||||
|
||||
c = create_and_start_container(db, one_off=OneOffFilter.only)
|
||||
|
||||
assert set(get_links(c)) == set([
|
||||
assert set(get_links(c)) == {
|
||||
db1.name, db1.name_without_project,
|
||||
db2.name, db2.name_without_project,
|
||||
'db'
|
||||
])
|
||||
}
|
||||
|
||||
def test_start_container_builds_images(self):
|
||||
service = Service(
|
||||
@ -1719,14 +1719,14 @@ class ServiceTest(DockerClientTestCase):
|
||||
options = service._get_container_create_options({}, service._next_container_number())
|
||||
original = Container.create(service.client, **options)
|
||||
|
||||
assert set(service.containers(stopped=True)) == set([original])
|
||||
assert set(service.containers(stopped=True)) == {original}
|
||||
assert set(service.duplicate_containers()) == set()
|
||||
|
||||
options['name'] = 'temporary_container_name'
|
||||
duplicate = Container.create(service.client, **options)
|
||||
|
||||
assert set(service.containers(stopped=True)) == set([original, duplicate])
|
||||
assert set(service.duplicate_containers()) == set([duplicate])
|
||||
assert set(service.containers(stopped=True)) == {original, duplicate}
|
||||
assert set(service.duplicate_containers()) == {duplicate}
|
||||
|
||||
|
||||
def converge(service, strategy=ConvergenceStrategy.changed):
|
||||
|
@ -39,7 +39,7 @@ class ProjectTestCase(DockerClientTestCase):
|
||||
|
||||
class BasicProjectTest(ProjectTestCase):
|
||||
def setUp(self):
|
||||
super(BasicProjectTest, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
self.cfg = {
|
||||
'db': {'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top'},
|
||||
@ -95,7 +95,7 @@ class BasicProjectTest(ProjectTestCase):
|
||||
|
||||
class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
def setUp(self):
|
||||
super(ProjectWithDependenciesTest, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
self.cfg = {
|
||||
'db': {
|
||||
@ -116,7 +116,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
|
||||
def test_up(self):
|
||||
containers = self.run_up(self.cfg)
|
||||
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
|
||||
assert {c.service for c in containers} == {'db', 'web', 'nginx'}
|
||||
|
||||
def test_change_leaf(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
@ -124,7 +124,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['nginx'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'nginx'}
|
||||
|
||||
def test_change_middle(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
@ -132,7 +132,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['web'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'web'}
|
||||
|
||||
def test_change_middle_always_recreate_deps(self):
|
||||
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
@ -140,7 +140,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == {'web', 'nginx'}
|
||||
assert {c.service for c in new_containers - old_containers} == {'web', 'nginx'}
|
||||
|
||||
def test_change_root(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
@ -148,7 +148,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['db'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'db'}
|
||||
|
||||
def test_change_root_always_recreate_deps(self):
|
||||
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
@ -156,7 +156,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == {
|
||||
assert {c.service for c in new_containers - old_containers} == {
|
||||
'db', 'web', 'nginx'
|
||||
}
|
||||
|
||||
@ -213,7 +213,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
|
||||
class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
def setUp(self):
|
||||
super(ProjectWithDependsOnDependenciesTest, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
self.cfg = {
|
||||
'version': '2',
|
||||
@ -238,7 +238,7 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
def test_up(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
containers = self.run_up(local_cfg)
|
||||
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
|
||||
assert {c.service for c in containers} == {'db', 'web', 'nginx'}
|
||||
|
||||
def test_change_leaf(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
@ -247,7 +247,7 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
local_cfg['services']['nginx']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(local_cfg)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['nginx'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'nginx'}
|
||||
|
||||
def test_change_middle(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
@ -256,7 +256,7 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(local_cfg)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['web'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'web'}
|
||||
|
||||
def test_change_middle_always_recreate_deps(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
@ -265,7 +265,7 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(local_cfg, always_recreate_deps=True)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['web', 'nginx'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'web', 'nginx'}
|
||||
|
||||
def test_change_root(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
@ -274,7 +274,7 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(local_cfg)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['db'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'db'}
|
||||
|
||||
def test_change_root_always_recreate_deps(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
@ -283,7 +283,7 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(local_cfg, always_recreate_deps=True)
|
||||
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['db', 'web', 'nginx'])
|
||||
assert {c.service for c in new_containers - old_containers} == {'db', 'web', 'nginx'}
|
||||
|
||||
def test_change_root_no_recreate(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
@ -303,24 +303,24 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
del next_cfg['services']['web']['depends_on']
|
||||
|
||||
containers = self.run_up(local_cfg)
|
||||
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
|
||||
assert {c.service for c in containers} == {'db', 'web', 'nginx'}
|
||||
|
||||
project = self.make_project(local_cfg)
|
||||
project.stop(timeout=1)
|
||||
|
||||
next_containers = self.run_up(next_cfg)
|
||||
assert set(c.service for c in next_containers) == set(['web', 'nginx'])
|
||||
assert {c.service for c in next_containers} == {'web', 'nginx'}
|
||||
|
||||
def test_service_removed_while_up(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
containers = self.run_up(local_cfg)
|
||||
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
|
||||
assert {c.service for c in containers} == {'db', 'web', 'nginx'}
|
||||
|
||||
del local_cfg['services']['db']
|
||||
del local_cfg['services']['web']['depends_on']
|
||||
|
||||
containers = self.run_up(local_cfg)
|
||||
assert set(c.service for c in containers) == set(['web', 'nginx'])
|
||||
assert {c.service for c in containers} == {'web', 'nginx'}
|
||||
|
||||
def test_dependency_removed(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
@ -328,24 +328,24 @@ class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
|
||||
del next_cfg['services']['nginx']['depends_on']
|
||||
|
||||
containers = self.run_up(local_cfg, service_names=['nginx'])
|
||||
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
|
||||
assert {c.service for c in containers} == {'db', 'web', 'nginx'}
|
||||
|
||||
project = self.make_project(local_cfg)
|
||||
project.stop(timeout=1)
|
||||
|
||||
next_containers = self.run_up(next_cfg, service_names=['nginx'])
|
||||
assert set(c.service for c in next_containers if c.is_running) == set(['nginx'])
|
||||
assert {c.service for c in next_containers if c.is_running} == {'nginx'}
|
||||
|
||||
def test_dependency_added(self):
|
||||
local_cfg = copy.deepcopy(self.cfg)
|
||||
|
||||
del local_cfg['services']['nginx']['depends_on']
|
||||
containers = self.run_up(local_cfg, service_names=['nginx'])
|
||||
assert set(c.service for c in containers) == set(['nginx'])
|
||||
assert {c.service for c in containers} == {'nginx'}
|
||||
|
||||
local_cfg['services']['nginx']['depends_on'] = ['db']
|
||||
containers = self.run_up(local_cfg, service_names=['nginx'])
|
||||
assert set(c.service for c in containers) == set(['nginx', 'db'])
|
||||
assert {c.service for c in containers} == {'nginx', 'db'}
|
||||
|
||||
|
||||
class ServiceStateTest(DockerClientTestCase):
|
||||
|
@ -18,7 +18,7 @@ class VolumeTest(DockerClientTestCase):
|
||||
except DockerException:
|
||||
pass
|
||||
del self.tmp_volumes
|
||||
super(VolumeTest, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def create_volume(self, name, driver=None, opts=None, external=None, custom_name=False):
|
||||
if external:
|
||||
|
@ -1,4 +1,3 @@
|
||||
# ~*~ encoding: utf-8 ~*~
|
||||
import os
|
||||
|
||||
import pytest
|
||||
@ -9,7 +8,7 @@ from compose.const import IS_WINDOWS_PLATFORM
|
||||
from tests import mock
|
||||
|
||||
|
||||
class TestGetConfigPathFromOptions(object):
|
||||
class TestGetConfigPathFromOptions:
|
||||
|
||||
def test_path_from_options(self):
|
||||
paths = ['one.yml', 'two.yml']
|
||||
|
@ -55,7 +55,7 @@ class DockerClientTestCase(unittest.TestCase):
|
||||
|
||||
def test_user_agent(self):
|
||||
client = docker_client(os.environ)
|
||||
expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format(
|
||||
expected = "docker-compose/{} docker-py/{} {}/{}".format(
|
||||
compose.__version__,
|
||||
docker.__version__,
|
||||
platform.system(),
|
||||
@ -151,9 +151,9 @@ class TLSConfigTestCase(unittest.TestCase):
|
||||
|
||||
def test_tls_client_and_ca_quoted_paths(self):
|
||||
options = {
|
||||
'--tlscacert': '"{0}"'.format(self.ca_cert),
|
||||
'--tlscert': '"{0}"'.format(self.client_cert),
|
||||
'--tlskey': '"{0}"'.format(self.key),
|
||||
'--tlscacert': '"{}"'.format(self.ca_cert),
|
||||
'--tlscert': '"{}"'.format(self.client_cert),
|
||||
'--tlskey': '"{}"'.format(self.key),
|
||||
'--tlsverify': True
|
||||
}
|
||||
result = tls_config_from_options(options)
|
||||
@ -185,9 +185,9 @@ class TLSConfigTestCase(unittest.TestCase):
|
||||
'DOCKER_TLS_VERIFY': 'false'
|
||||
})
|
||||
options = {
|
||||
'--tlscacert': '"{0}"'.format(self.ca_cert),
|
||||
'--tlscert': '"{0}"'.format(self.client_cert),
|
||||
'--tlskey': '"{0}"'.format(self.key),
|
||||
'--tlscacert': '"{}"'.format(self.ca_cert),
|
||||
'--tlscert': '"{}"'.format(self.client_cert),
|
||||
'--tlskey': '"{}"'.format(self.key),
|
||||
'--tlsverify': True
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ class TLSConfigTestCase(unittest.TestCase):
|
||||
assert result.cert == (self.client_cert, self.key)
|
||||
|
||||
|
||||
class TestGetTlsVersion(object):
|
||||
class TestGetTlsVersion:
|
||||
def test_get_tls_version_default(self):
|
||||
environment = {}
|
||||
assert get_tls_version(environment) is None
|
||||
|
@ -21,7 +21,7 @@ def patch_find_executable(side_effect):
|
||||
side_effect=side_effect)
|
||||
|
||||
|
||||
class TestHandleConnectionErrors(object):
|
||||
class TestHandleConnectionErrors:
|
||||
|
||||
def test_generic_connection_error(self, mock_logging):
|
||||
with pytest.raises(errors.ConnectionError):
|
||||
@ -43,7 +43,7 @@ class TestHandleConnectionErrors(object):
|
||||
def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
|
||||
with pytest.raises(errors.ConnectionError):
|
||||
with handle_connection_errors(mock.Mock(api_version='1.38')):
|
||||
raise APIError(None, None, u"client is newer than server")
|
||||
raise APIError(None, None, "client is newer than server")
|
||||
|
||||
_, args, _ = mock_logging.error.mock_calls[0]
|
||||
assert "Docker Engine of version 18.06.0 or greater" in args[0]
|
||||
@ -57,7 +57,7 @@ class TestHandleConnectionErrors(object):
|
||||
mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
|
||||
|
||||
def test_api_error_version_other_unicode_explanation(self, mock_logging):
|
||||
msg = u"Something broke!"
|
||||
msg = "Something broke!"
|
||||
with pytest.raises(errors.ConnectionError):
|
||||
with handle_connection_errors(mock.Mock(api_version='1.22')):
|
||||
raise APIError(None, None, msg)
|
||||
|
@ -40,10 +40,10 @@ class ConsoleWarningFormatterTestCase(unittest.TestCase):
|
||||
message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
|
||||
output = self.formatter.format(make_log_record(logging.WARN, message))
|
||||
expected = colors.yellow('WARNING') + ': '
|
||||
assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
|
||||
assert output == '{}{}'.format(expected, message.decode('utf-8'))
|
||||
|
||||
def test_format_unicode_error(self):
|
||||
message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
|
||||
output = self.formatter.format(make_log_record(logging.ERROR, message))
|
||||
expected = colors.red('ERROR') + ': '
|
||||
assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
|
||||
assert output == '{}{}'.format(expected, message.decode('utf-8'))
|
||||
|
@ -29,7 +29,7 @@ def mock_container():
|
||||
return mock.Mock(spec=Container, name_without_project='web_1')
|
||||
|
||||
|
||||
class TestLogPresenter(object):
|
||||
class TestLogPresenter:
|
||||
|
||||
def test_monochrome(self, mock_container):
|
||||
presenters = build_log_presenters(['foo', 'bar'], True)
|
||||
@ -83,7 +83,7 @@ def test_build_no_log_generator(mock_container):
|
||||
assert "exited with code" not in output
|
||||
|
||||
|
||||
class TestBuildLogGenerator(object):
|
||||
class TestBuildLogGenerator:
|
||||
|
||||
def test_no_log_stream(self, mock_container):
|
||||
mock_container.log_stream = None
|
||||
@ -108,7 +108,7 @@ class TestBuildLogGenerator(object):
|
||||
assert next(generator) == "world"
|
||||
|
||||
def test_unicode(self, output_stream):
|
||||
glyph = u'\u2022\n'
|
||||
glyph = '\u2022\n'
|
||||
mock_container.log_stream = iter([glyph.encode('utf-8')])
|
||||
|
||||
generator = build_log_generator(mock_container, {})
|
||||
@ -125,7 +125,7 @@ def mock_presenters():
|
||||
return itertools.cycle([mock.Mock()])
|
||||
|
||||
|
||||
class TestWatchEvents(object):
|
||||
class TestWatchEvents:
|
||||
|
||||
def test_stop_event(self, thread_map, mock_presenters):
|
||||
event_stream = [{'action': 'stop', 'id': 'cid'}]
|
||||
@ -167,7 +167,7 @@ class TestWatchEvents(object):
|
||||
assert container_id not in thread_map
|
||||
|
||||
|
||||
class TestConsumeQueue(object):
|
||||
class TestConsumeQueue:
|
||||
|
||||
def test_item_is_an_exception(self):
|
||||
|
||||
|
@ -22,7 +22,7 @@ def mock_container(service, number):
|
||||
container.Container,
|
||||
service=service,
|
||||
number=number,
|
||||
name_without_project='{0}_{1}'.format(service, number))
|
||||
name_without_project='{}_{}'.format(service, number))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@ -32,7 +32,7 @@ def logging_handler():
|
||||
return logging.StreamHandler(stream=stream)
|
||||
|
||||
|
||||
class TestCLIMainTestCase(object):
|
||||
class TestCLIMainTestCase:
|
||||
|
||||
def test_filter_attached_containers(self):
|
||||
containers = [
|
||||
@ -135,7 +135,7 @@ class TestCLIMainTestCase(object):
|
||||
assert expected_docker_start_call == docker_start_call
|
||||
|
||||
|
||||
class TestSetupConsoleHandlerTestCase(object):
|
||||
class TestSetupConsoleHandlerTestCase:
|
||||
|
||||
def test_with_tty_verbose(self, logging_handler):
|
||||
setup_console_handler(logging_handler, True)
|
||||
@ -155,7 +155,7 @@ class TestSetupConsoleHandlerTestCase(object):
|
||||
assert type(logging_handler.formatter) == logging.Formatter
|
||||
|
||||
|
||||
class TestConvergeStrategyFromOptsTestCase(object):
|
||||
class TestConvergeStrategyFromOptsTestCase:
|
||||
|
||||
def test_invalid_opts(self):
|
||||
options = {'--force-recreate': True, '--no-recreate': True}
|
||||
@ -189,7 +189,7 @@ def mock_find_executable(exe):
|
||||
|
||||
|
||||
@mock.patch('compose.cli.main.find_executable', mock_find_executable)
|
||||
class TestCallDocker(object):
|
||||
class TestCallDocker:
|
||||
def test_simple_no_options(self):
|
||||
with mock.patch('subprocess.call') as fake_call:
|
||||
call_docker(['ps'], {}, {})
|
||||
|
@ -1,4 +1,3 @@
|
||||
# encoding: utf-8
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
@ -1,4 +1,3 @@
|
||||
# encoding: utf-8
|
||||
import codecs
|
||||
import os
|
||||
import shutil
|
||||
@ -3885,12 +3884,12 @@ class VolumeConfigTest(unittest.TestCase):
|
||||
assert d['volumes'] == ['~:/data']
|
||||
|
||||
def test_volume_path_with_non_ascii_directory(self):
|
||||
volume = u'/Füü/data:/data'
|
||||
volume = '/Füü/data:/data'
|
||||
container_path = config.resolve_volume_path(".", volume)
|
||||
assert container_path == volume
|
||||
|
||||
|
||||
class MergePathMappingTest(object):
|
||||
class MergePathMappingTest:
|
||||
config_name = ""
|
||||
|
||||
def test_empty(self):
|
||||
@ -3963,7 +3962,7 @@ class BuildOrImageMergeTest(unittest.TestCase):
|
||||
assert config.merge_service_dicts({'image': 'redis'}, {'build': '.'}, V1) == {'build': '.'}
|
||||
|
||||
|
||||
class MergeListsTest(object):
|
||||
class MergeListsTest:
|
||||
config_name = ""
|
||||
base_config = []
|
||||
override_config = []
|
||||
@ -4396,7 +4395,7 @@ class EnvTest(unittest.TestCase):
|
||||
{'env_file': ['tests/fixtures/env/resolve.env']},
|
||||
Environment.from_env_file(None)
|
||||
) == {
|
||||
'FILE_DEF': u'bär',
|
||||
'FILE_DEF': 'bär',
|
||||
'FILE_DEF_EMPTY': '',
|
||||
'ENV_DEF': 'E3',
|
||||
'NO_DEF': None
|
||||
@ -5042,14 +5041,14 @@ class VolumePathTest(unittest.TestCase):
|
||||
container_path = 'c:\\scarletdevil\\data'
|
||||
expected_mapping = (container_path, (host_path, None))
|
||||
|
||||
mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
|
||||
mapping = config.split_path_mapping('{}:{}'.format(host_path, container_path))
|
||||
assert mapping == expected_mapping
|
||||
|
||||
def test_split_path_mapping_with_root_mount(self):
|
||||
host_path = '/'
|
||||
container_path = '/var/hostroot'
|
||||
expected_mapping = (container_path, (host_path, None))
|
||||
mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
|
||||
mapping = config.split_path_mapping('{}:{}'.format(host_path, container_path))
|
||||
assert mapping == expected_mapping
|
||||
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# encoding: utf-8
|
||||
import codecs
|
||||
import os
|
||||
import shutil
|
||||
|
@ -1,4 +1,3 @@
|
||||
# encoding: utf-8
|
||||
import pytest
|
||||
|
||||
from compose.config.environment import Environment
|
||||
@ -439,7 +438,7 @@ def test_unbraced_separators(defaults_interpolator):
|
||||
|
||||
def test_interpolate_unicode_values():
|
||||
variable_mapping = {
|
||||
'FOO': '十六夜 咲夜'.encode('utf-8'),
|
||||
'FOO': '十六夜 咲夜'.encode(),
|
||||
'BAR': '十六夜 咲夜'
|
||||
}
|
||||
interpol = Interpolator(TemplateWithDefaults, variable_mapping).interpolate
|
||||
|
@ -5,7 +5,7 @@ from compose.config.sort_services import sort_service_dicts
|
||||
from compose.config.types import VolumeFromSpec
|
||||
|
||||
|
||||
class TestSortService(object):
|
||||
class TestSortService:
|
||||
def test_sort_service_dicts_1(self):
|
||||
services = [
|
||||
{
|
||||
|
@ -39,7 +39,7 @@ def test_parse_extra_hosts_dict():
|
||||
}
|
||||
|
||||
|
||||
class TestServicePort(object):
|
||||
class TestServicePort:
|
||||
def test_parse_dict(self):
|
||||
data = {
|
||||
'target': 8000,
|
||||
@ -129,7 +129,7 @@ class TestServicePort(object):
|
||||
ServicePort.parse(port_def)
|
||||
|
||||
|
||||
class TestVolumeSpec(object):
|
||||
class TestVolumeSpec:
|
||||
|
||||
def test_parse_volume_spec_only_one_path(self):
|
||||
spec = VolumeSpec.parse('/the/volume')
|
||||
@ -216,7 +216,7 @@ class TestVolumeSpec(object):
|
||||
)
|
||||
|
||||
|
||||
class TestVolumesFromSpec(object):
|
||||
class TestVolumesFromSpec:
|
||||
|
||||
services = ['servicea', 'serviceb']
|
||||
|
||||
|
@ -1,5 +1,3 @@
|
||||
# ~*~ encoding: utf-8 ~*~
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
@ -75,7 +73,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
|
||||
def mktempfile(encoding):
|
||||
fname = os.path.join(tmpdir, hex(random.getrandbits(128))[2:-1])
|
||||
return io.open(fname, mode='w+', encoding=encoding)
|
||||
return open(fname, mode='w+', encoding=encoding)
|
||||
|
||||
text = '就吃饭'
|
||||
with mktempfile(encoding='utf-8') as tf:
|
||||
|
@ -1,4 +1,3 @@
|
||||
# encoding: utf-8
|
||||
import datetime
|
||||
import os
|
||||
import tempfile
|
||||
@ -739,7 +738,7 @@ class ProjectTest(unittest.TestCase):
|
||||
assert fake_log.warn.call_count == 0
|
||||
|
||||
def test_no_such_service_unicode(self):
|
||||
assert NoSuchService('十六夜 咲夜'.encode('utf-8')).msg == 'No such service: 十六夜 咲夜'
|
||||
assert NoSuchService('十六夜 咲夜'.encode()).msg == 'No such service: 十六夜 咲夜'
|
||||
assert NoSuchService('十六夜 咲夜').msg == 'No such service: 十六夜 咲夜'
|
||||
|
||||
def test_project_platform_value(self):
|
||||
|
@ -63,9 +63,9 @@ class ServiceTest(unittest.TestCase):
|
||||
assert [c.id for c in service.containers()] == list(range(3))
|
||||
|
||||
expected_labels = [
|
||||
'{0}=myproject'.format(LABEL_PROJECT),
|
||||
'{0}=db'.format(LABEL_SERVICE),
|
||||
'{0}=False'.format(LABEL_ONE_OFF),
|
||||
'{}=myproject'.format(LABEL_PROJECT),
|
||||
'{}=db'.format(LABEL_SERVICE),
|
||||
'{}=False'.format(LABEL_ONE_OFF),
|
||||
]
|
||||
|
||||
self.mock_client.containers.assert_called_once_with(
|
||||
|
@ -36,7 +36,7 @@ class SplitBufferTest(unittest.TestCase):
|
||||
self.assert_produces(reader, ['abc\n', 'd'])
|
||||
|
||||
def test_preserves_unicode_sequences_within_lines(self):
|
||||
string = u"a\u2022c\n"
|
||||
string = "a\u2022c\n"
|
||||
|
||||
def reader():
|
||||
yield string.encode('utf-8')
|
||||
|
@ -1,8 +1,7 @@
|
||||
# encoding: utf-8
|
||||
from compose import utils
|
||||
|
||||
|
||||
class TestJsonSplitter(object):
|
||||
class TestJsonSplitter:
|
||||
|
||||
def test_json_splitter_no_object(self):
|
||||
data = '{"foo": "bar'
|
||||
@ -17,7 +16,7 @@ class TestJsonSplitter(object):
|
||||
assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
|
||||
|
||||
|
||||
class TestStreamAsText(object):
|
||||
class TestStreamAsText:
|
||||
|
||||
def test_stream_with_non_utf_unicode_character(self):
|
||||
stream = [b'\xed\xf3\xf3']
|
||||
@ -25,12 +24,12 @@ class TestStreamAsText(object):
|
||||
assert output == '<EFBFBD><EFBFBD><EFBFBD>'
|
||||
|
||||
def test_stream_with_utf_character(self):
|
||||
stream = ['ěĝ'.encode('utf-8')]
|
||||
stream = ['ěĝ'.encode()]
|
||||
output, = utils.stream_as_text(stream)
|
||||
assert output == 'ěĝ'
|
||||
|
||||
|
||||
class TestJsonStream(object):
|
||||
class TestJsonStream:
|
||||
|
||||
def test_with_falsy_entries(self):
|
||||
stream = [
|
||||
@ -59,7 +58,7 @@ class TestJsonStream(object):
|
||||
]
|
||||
|
||||
|
||||
class TestParseBytes(object):
|
||||
class TestParseBytes:
|
||||
def test_parse_bytes(self):
|
||||
assert utils.parse_bytes('123kb') == 123 * 1024
|
||||
assert utils.parse_bytes(123) == 123
|
||||
@ -67,7 +66,7 @@ class TestParseBytes(object):
|
||||
assert utils.parse_bytes('123') == 123
|
||||
|
||||
|
||||
class TestMoreItertools(object):
|
||||
class TestMoreItertools:
|
||||
def test_unique_everseen(self):
|
||||
unique = utils.unique_everseen
|
||||
assert list(unique([2, 1, 2, 1])) == [2, 1]
|
||||
|
@ -10,7 +10,7 @@ def mock_client():
|
||||
return mock.create_autospec(docker.APIClient)
|
||||
|
||||
|
||||
class TestVolume(object):
|
||||
class TestVolume:
|
||||
|
||||
def test_remove_local_volume(self, mock_client):
|
||||
vol = volume.Volume(mock_client, 'foo', 'project')
|
||||
|
Loading…
x
Reference in New Issue
Block a user