Fix #5465 by catching a no-image exception in

get_container_data_volumes. The ImageNotFound exception is now bubbled
up to the client, which prompts the user on the desired course of action
(rebuild or abort). Added a case to the unit test to check that
an empty existing image doesn't result in an exception.

Closed old pull request #5466 because I did a rebase and it showed over
300 commits to merge, which I thought was messy.

Signed-off-by: Ian Glen Neal <ian.gl.neal@gmail.com>
This commit is contained in:
Ian Glen Neal 2017-12-10 23:50:24 +00:00 committed by Joffrey F
parent 5d4ccafd35
commit 7591639c72
5 changed files with 106 additions and 37 deletions

View File

@ -972,24 +972,50 @@ class TopLevelCommand(object):
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
if no_start:
for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
if options.get(excluded):
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
opts = ['-d', '--abort-on-container-exit', '--exit-code-from']
for excluded in [x for x in opts if options.get(x)]:
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
with up_shutdown_context(self.project, service_names, timeout, detached):
to_attach = self.project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
timeout=timeout,
detached=detached,
remove_orphans=remove_orphans,
ignore_orphans=ignore_orphans,
scale_override=parse_scale_args(options['--scale']),
start=not no_start,
always_recreate_deps=always_recreate_deps
)
try:
to_attach = self.project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
timeout=timeout,
detached=detached,
remove_orphans=remove_orphans,
ignore_orphans=ignore_orphans,
scale_override=parse_scale_args(options['--scale']),
start=not no_start,
always_recreate_deps=always_recreate_deps,
rebuild=False
)
except docker.errors.ImageNotFound as e:
log.error(("Image not found. If you continue, there is a "
"risk of data loss. Consider backing up your data "
"before continuing.\n\n"
"Full error message: {}\n"
).format(e.explanation))
res = yesno("Continue by rebuilding the image(s)? [yN]", False)
if res is None or not res:
raise e
to_attach = self.project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
timeout=timeout,
detached=detached,
remove_orphans=remove_orphans,
ignore_orphans=ignore_orphans,
scale_override=parse_scale_args(options['--scale']),
start=not no_start,
always_recreate_deps=always_recreate_deps,
rebuild=True
)
if detached or no_start:
return

View File

@ -8,6 +8,7 @@ from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from docker.errors import ImageNotFound
from six.moves import _thread as thread
from six.moves.queue import Empty
from six.moves.queue import Queue
@ -53,10 +54,7 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, pa
writer = ParallelStreamWriter(stream, msg)
if parent_objects:
display_objects = list(parent_objects)
else:
display_objects = objects
display_objects = list(parent_objects) if parent_objects else objects
for obj in display_objects:
writer.add_object(get_name(obj))
@ -76,6 +74,12 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, pa
if exception is None:
writer.write(get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
# can prompt the user if they want to rebuild.
errors[get_name(obj)] = exception.explanation
writer.write(get_name(obj), 'error', red)
error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(get_name(obj), 'error', red)

View File

@ -442,6 +442,7 @@ class Project(object):
remove_orphans=False,
ignore_orphans=False,
scale_override=None,
rebuild=False,
rescale=True,
start=True,
always_recreate_deps=False):
@ -472,6 +473,7 @@ class Project(object):
timeout=timeout,
detached=detached,
scale_override=scale_override.get(service.name),
rebuild=rebuild,
rescale=rescale,
start=start,
project_services=scaled_services

View File

@ -280,6 +280,7 @@ class Service(object):
previous_container=None,
number=None,
quiet=False,
rebuild=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
@ -293,6 +294,7 @@ class Service(object):
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
rebuild=rebuild,
previous_container=previous_container,
)
@ -409,7 +411,7 @@ class Service(object):
return containers
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start, rebuild):
if scale is not None and len(containers) > scale:
self._downscale(containers[scale:], timeout)
containers = containers[:scale]
@ -417,7 +419,7 @@ class Service(object):
def recreate(container):
return self.recreate_container(
container, timeout=timeout, attach_logs=not detached,
start_new_container=start
start_new_container=start, rebuild=rebuild
)
containers, errors = parallel_execute(
containers,
@ -468,7 +470,8 @@ class Service(object):
)
def execute_convergence_plan(self, plan, timeout=None, detached=False,
start=True, scale_override=None, rescale=True, project_services=None):
start=True, scale_override=None, rebuild=False,
rescale=True, project_services=None):
(action, containers) = plan
scale = scale_override if scale_override is not None else self.scale_num
containers = sorted(containers, key=attrgetter('number'))
@ -487,7 +490,7 @@ class Service(object):
if action == 'recreate':
return self._execute_convergence_recreate(
containers, scale, timeout, detached, start
containers, scale, timeout, detached, start, rebuild
)
if action == 'start':
@ -512,6 +515,7 @@ class Service(object):
container,
timeout=None,
attach_logs=False,
rebuild=False,
start_new_container=True):
"""Recreate a container.
@ -526,6 +530,7 @@ class Service(object):
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
rebuild=rebuild
)
if attach_logs:
new_container.attach_log_stream()
@ -746,6 +751,7 @@ class Service(object):
override_options,
number,
one_off=False,
rebuild=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
@ -795,7 +801,7 @@ class Service(object):
override_options.get('labels'))
container_options, override_options = self._build_container_volume_options(
previous_container, container_options, override_options
previous_container, container_options, override_options, rebuild
)
container_options['image'] = self.image_name
@ -822,7 +828,8 @@ class Service(object):
container_options['environment'])
return container_options
def _build_container_volume_options(self, previous_container, container_options, override_options):
def _build_container_volume_options(self, previous_container, container_options,
override_options, rebuild):
container_volumes = []
container_mounts = []
if 'volumes' in container_options:
@ -833,7 +840,7 @@ class Service(object):
binds, affinity = merge_volume_bindings(
container_volumes, self.options.get('tmpfs') or [], previous_container,
container_mounts
container_mounts, rebuild
)
override_options['binds'] = binds
container_options['environment'].update(affinity)
@ -1281,7 +1288,7 @@ def parse_repository_tag(repo_path):
# Volumes
def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
def merge_volume_bindings(volumes, tmpfs, previous_container, mounts, rebuild):
"""
Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
@ -1297,7 +1304,7 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
if previous_container:
old_volumes, old_mounts = get_container_data_volumes(
previous_container, volumes, tmpfs, mounts
previous_container, volumes, tmpfs, mounts, rebuild
)
warn_on_masked_volume(volumes, old_volumes, previous_container.service)
volume_bindings.update(
@ -1310,12 +1317,34 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
return list(volume_bindings.values()), affinity
def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
def try_get_image_volumes(container, rebuild):
"""
Try to get the volumes from the existing container. If the image does
not exist, prompt the user to either continue (rebuild the image from
scratch) or raise an exception.
"""
try:
image_volumes = [
VolumeSpec.parse(volume)
for volume in
container.image_config['ContainerConfig'].get('Volumes') or {}
]
return image_volumes
except ImageNotFound:
if rebuild:
# This will force Compose to rebuild the images.
return []
raise
def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option, rebuild):
"""
Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
Anonymous volume mounts are updated in place instead.
"""
volumes = []
volumes_option = volumes_option or []
@ -1324,11 +1353,7 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
for mount in container.get('Mounts') or {}
)
image_volumes = [
VolumeSpec.parse(volume)
for volume in
container.image_config['ContainerConfig'].get('Volumes') or {}
]
image_volumes = try_get_image_volumes(container, rebuild)
for volume in set(volumes_option + image_volumes):
# No need to preserve host volumes

View File

@ -923,7 +923,19 @@ class ServiceVolumesTest(unittest.TestCase):
VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
]
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [])
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [], False)
assert sorted(volumes) == sorted(expected)
# Issue 5465, check for non-existant image.
container = Container(self.mock_client, {
'Image': None,
'Mounts': []
}, has_been_inspected=True)
expected = []
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [], False)
assert sorted(volumes) == sorted(expected)
def test_merge_volume_bindings(self):
@ -959,7 +971,7 @@ class ServiceVolumesTest(unittest.TestCase):
'existingvolume:/existing/volume:rw',
]
binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [])
binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [], False)
assert sorted(binds) == sorted(expected)
assert affinity == {'affinity:container': '=cdefab'}