Merge pull request #3595 from dnephin/add-push-and-bundle

Add docker-compose push and docker-compose bundle
This commit is contained in:
Joffrey F 2016-06-14 10:57:32 -07:00 committed by GitHub
commit 0fe82614a6
7 changed files with 333 additions and 20 deletions

224
compose/bundle.py Normal file
View File

@ -0,0 +1,224 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import six
from docker.utils import split_command
from docker.utils.ports import split_port
from .cli.errors import UserError
from .config.serialize import denormalize_config
from .network import get_network_defs_for_service
from .service import format_environment
from .service import NoSuchImageError
from .service import parse_repository_tag
log = logging.getLogger(__name__)
SERVICE_KEYS = {
'working_dir': 'WorkingDir',
'user': 'User',
'labels': 'Labels',
}
IGNORED_KEYS = {'build'}
SUPPORTED_KEYS = {
'image',
'ports',
'expose',
'networks',
'command',
'environment',
'entrypoint',
} | set(SERVICE_KEYS)
VERSION = '0.1'
def serialize_bundle(config, image_digests):
if config.networks:
log.warn("Unsupported top level key 'networks' - ignoring")
if config.volumes:
log.warn("Unsupported top level key 'volumes' - ignoring")
return json.dumps(
to_bundle(config, image_digests),
indent=2,
sort_keys=True,
)
def get_image_digests(project):
return {
service.name: get_image_digest(service)
for service in project.services
}
def get_image_digest(service):
if 'image' not in service.options:
raise UserError(
"Service '{s.name}' doesn't define an image tag. An image name is "
"required to generate a proper image digest for the bundle. Specify "
"an image repo and tag with the 'image' option.".format(s=service))
repo, tag, separator = parse_repository_tag(service.options['image'])
# Compose file already uses a digest, no lookup required
if separator == '@':
return service.options['image']
try:
image = service.image()
except NoSuchImageError:
action = 'build' if 'build' in service.options else 'pull'
raise UserError(
"Image not found for service '{service}'. "
"You might need to run `docker-compose {action} {service}`."
.format(service=service.name, action=action))
if image['RepoDigests']:
# TODO: pick a digest based on the image tag if there are multiple
# digests
return image['RepoDigests'][0]
if 'build' not in service.options:
log.warn(
"Compose needs to pull the image for '{s.name}' in order to create "
"a bundle. This may result in a more recent image being used. "
"It is recommended that you use an image tagged with a "
"specific version to minimize the potential "
"differences.".format(s=service))
digest = service.pull()
else:
try:
digest = service.push()
except:
log.error(
"Failed to push image for service '{s.name}'. Please use an "
"image tag that can be pushed to a Docker "
"registry.".format(s=service))
raise
if not digest:
raise ValueError("Failed to get digest for %s" % service.name)
identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
# Pull by digest so that image['RepoDigests'] is populated for next time
# and we don't have to pull/push again
service.client.pull(identifier)
return identifier
def to_bundle(config, image_digests):
config = denormalize_config(config)
return {
'version': VERSION,
'services': {
name: convert_service_to_bundle(
name,
service_dict,
image_digests[name],
)
for name, service_dict in config['services'].items()
},
}
def convert_service_to_bundle(name, service_dict, image_digest):
container_config = {'Image': image_digest}
for key, value in service_dict.items():
if key in IGNORED_KEYS:
continue
if key not in SUPPORTED_KEYS:
log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
continue
if key == 'environment':
container_config['Env'] = format_environment({
envkey: envvalue for envkey, envvalue in value.items()
if envvalue
})
continue
if key in SERVICE_KEYS:
container_config[SERVICE_KEYS[key]] = value
continue
set_command_and_args(
container_config,
service_dict.get('entrypoint', []),
service_dict.get('command', []))
container_config['Networks'] = make_service_networks(name, service_dict)
ports = make_port_specs(service_dict)
if ports:
container_config['Ports'] = ports
return container_config
# See https://github.com/docker/swarmkit/blob//agent/exec/container/container.go#L95
def set_command_and_args(config, entrypoint, command):
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(command, six.string_types):
command = split_command(command)
if entrypoint:
config['Command'] = entrypoint + command
return
if command:
config['Args'] = command
def make_service_networks(name, service_dict):
networks = []
for network_name, network_def in get_network_defs_for_service(service_dict).items():
for key in network_def.keys():
log.warn(
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
.format(key, name, network_name))
networks.append(network_name)
return networks
def make_port_specs(service_dict):
ports = []
internal_ports = [
internal_port
for port_def in service_dict.get('ports', [])
for internal_port in split_port(port_def)[0]
]
internal_ports += service_dict.get('expose', [])
for internal_port in internal_ports:
spec = make_port_spec(internal_port)
if spec not in ports:
ports.append(spec)
return ports
def make_port_spec(value):
components = six.text_type(value).partition('/')
return {
'Protocol': components[2] or 'tcp',
'Port': int(components[0]),
}

View File

@ -36,6 +36,16 @@ def project_from_options(project_dir, options):
)
def get_config_from_options(base_dir, options):
environment = Environment.from_env_file(base_dir)
config_path = get_config_path_from_options(
base_dir, options, environment
)
return config.load(
config.find(base_dir, config_path, environment)
)
def get_config_path_from_options(base_dir, options, environment):
file_option = options.get('--file')
if file_option:

View File

@ -14,10 +14,10 @@ from operator import attrgetter
from . import errors
from . import signals
from .. import __version__
from ..config import config
from ..bundle import get_image_digests
from ..bundle import serialize_bundle
from ..config import ConfigurationError
from ..config import parse_environment
from ..config.environment import Environment
from ..config.serialize import serialize_config
from ..const import DEFAULT_TIMEOUT
from ..const import IS_WINDOWS_PLATFORM
@ -30,7 +30,7 @@ from ..service import BuildError
from ..service import ConvergenceStrategy
from ..service import ImageType
from ..service import NeedsBuildError
from .command import get_config_path_from_options
from .command import get_config_from_options
from .command import project_from_options
from .docopt_command import DocoptDispatcher
from .docopt_command import get_handler
@ -98,7 +98,7 @@ def perform_command(options, handler, command_options):
handler(command_options)
return
if options['COMMAND'] == 'config':
if options['COMMAND'] in ('config', 'bundle'):
command = TopLevelCommand(None)
handler(command, options, command_options)
return
@ -164,6 +164,7 @@ class TopLevelCommand(object):
Commands:
build Build or rebuild services
bundle Generate a Docker bundle from the Compose file
config Validate and view the compose file
create Create services
down Stop and remove containers, networks, images, and volumes
@ -176,6 +177,7 @@ class TopLevelCommand(object):
port Print the public port for a port binding
ps List containers
pull Pulls service images
push Push service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
@ -212,6 +214,34 @@ class TopLevelCommand(object):
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)))
def bundle(self, config_options, options):
"""
Generate a Docker bundle from the Compose file.
Local images will be pushed to a Docker registry, and remote images
will be pulled to fetch an image digest.
Usage: bundle [options]
Options:
-o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dsb".
"""
self.project = project_from_options('.', config_options)
compose_config = get_config_from_options(self.project_dir, config_options)
output = options["--output"]
if not output:
output = "{}.dsb".format(self.project.name)
with errors.handle_connection_errors(self.project.client):
image_digests = get_image_digests(self.project)
with open(output, 'w') as f:
f.write(serialize_bundle(compose_config, image_digests))
log.info("Wrote bundle to {}".format(output))
def config(self, config_options, options):
"""
Validate and view the compose file.
@ -224,13 +254,7 @@ class TopLevelCommand(object):
--services Print the service names, one per line.
"""
environment = Environment.from_env_file(self.project_dir)
config_path = get_config_path_from_options(
self.project_dir, config_options, environment
)
compose_config = config.load(
config.find(self.project_dir, config_path, environment)
)
compose_config = get_config_from_options(self.project_dir, config_options)
if options['--quiet']:
return
@ -518,6 +542,20 @@ class TopLevelCommand(object):
ignore_pull_failures=options.get('--ignore-pull-failures')
)
def push(self, options):
"""
Pushes images for services.
Usage: push [options] [SERVICE...]
Options:
--ignore-push-failures Push what it can and ignores images with push failures.
"""
self.project.push(
service_names=options['SERVICE'],
ignore_push_failures=options.get('--ignore-push-failures')
)
def rm(self, options):
"""
Removes stopped service containers.

View File

@ -18,7 +18,7 @@ yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
def serialize_config(config):
def denormalize_config(config):
denormalized_services = [
denormalize_service_dict(service_dict, config.version)
for service_dict in config.services
@ -32,15 +32,17 @@ def serialize_config(config):
if 'external_name' in net_conf:
del net_conf['external_name']
output = {
return {
'version': V2_0,
'services': services,
'networks': networks,
'volumes': config.volumes,
}
def serialize_config(config):
return yaml.safe_dump(
output,
denormalize_config(config),
default_flow_style=False,
indent=2,
width=80)

View File

@ -91,3 +91,22 @@ def print_output_event(event, stream, is_terminal):
stream.write("%s%s" % (event['stream'], terminator))
else:
stream.write("%s%s\n" % (status, terminator))
def get_digest_from_pull(events):
for event in events:
status = event.get('status')
if not status or 'Digest' not in status:
continue
_, digest = status.split(':', 1)
return digest.strip()
return None
def get_digest_from_push(events):
for event in events:
digest = event.get('aux', {}).get('Digest')
if digest:
return digest
return None

View File

@ -440,6 +440,10 @@ class Project(object):
for service in self.get_services(service_names, include_deps=False):
service.pull(ignore_pull_failures)
def push(self, service_names=None, ignore_push_failures=False):
for service in self.get_services(service_names, include_deps=False):
service.push(ignore_push_failures)
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
return list(filter(None, [
Container.from_ps(self.client, container)

View File

@ -15,6 +15,7 @@ from docker.utils.ports import build_port_bindings
from docker.utils.ports import split_port
from . import __version__
from . import progress_stream
from .config import DOCKER_CONFIG_KEYS
from .config import merge_environment
from .config.types import VolumeSpec
@ -806,20 +807,35 @@ class Service(object):
repo, tag, separator = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
)
output = self.client.pull(repo, tag=tag, stream=True)
try:
stream_output(output, sys.stdout)
return progress_stream.get_digest_from_pull(
stream_output(output, sys.stdout))
except StreamOutputError as e:
if not ignore_pull_failures:
raise
else:
log.error(six.text_type(e))
def push(self, ignore_push_failures=False):
if 'image' not in self.options or 'build' not in self.options:
return
repo, tag, separator = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
output = self.client.push(repo, tag=tag, stream=True)
try:
return progress_stream.get_digest_from_push(
stream_output(output, sys.stdout))
except StreamOutputError as e:
if not ignore_push_failures:
raise
else:
log.error(six.text_type(e))
def short_id_alias_exists(container, network):
aliases = container.get(