mirror of https://github.com/docker/compose.git
Fix #5465 by catching a no-image exception in
get_container_data_volumes. The ImageNotFound exception is now bubbled up to the client, which prompts the user on the desired course of action (rebuild or abort). Added a case to the unit test to check that an empty existing image doesn't result in an exception. Closed old pull request #5466 because I did a rebase and it showed over 300 commits to merge, which I thought was messy. Signed-off-by: Ian Glen Neal <ian.gl.neal@gmail.com>
This commit is contained in:
parent
5d4ccafd35
commit
7591639c72
|
@ -972,11 +972,12 @@ class TopLevelCommand(object):
|
||||||
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
|
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
|
||||||
|
|
||||||
if no_start:
|
if no_start:
|
||||||
for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
|
opts = ['-d', '--abort-on-container-exit', '--exit-code-from']
|
||||||
if options.get(excluded):
|
for excluded in [x for x in opts if options.get(x)]:
|
||||||
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
|
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
|
||||||
|
|
||||||
with up_shutdown_context(self.project, service_names, timeout, detached):
|
with up_shutdown_context(self.project, service_names, timeout, detached):
|
||||||
|
try:
|
||||||
to_attach = self.project.up(
|
to_attach = self.project.up(
|
||||||
service_names=service_names,
|
service_names=service_names,
|
||||||
start_deps=start_deps,
|
start_deps=start_deps,
|
||||||
|
@ -988,7 +989,32 @@ class TopLevelCommand(object):
|
||||||
ignore_orphans=ignore_orphans,
|
ignore_orphans=ignore_orphans,
|
||||||
scale_override=parse_scale_args(options['--scale']),
|
scale_override=parse_scale_args(options['--scale']),
|
||||||
start=not no_start,
|
start=not no_start,
|
||||||
always_recreate_deps=always_recreate_deps
|
always_recreate_deps=always_recreate_deps,
|
||||||
|
rebuild=False
|
||||||
|
)
|
||||||
|
except docker.errors.ImageNotFound as e:
|
||||||
|
log.error(("Image not found. If you continue, there is a "
|
||||||
|
"risk of data loss. Consider backing up your data "
|
||||||
|
"before continuing.\n\n"
|
||||||
|
"Full error message: {}\n"
|
||||||
|
).format(e.explanation))
|
||||||
|
res = yesno("Continue by rebuilding the image(s)? [yN]", False)
|
||||||
|
if res is None or not res:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
to_attach = self.project.up(
|
||||||
|
service_names=service_names,
|
||||||
|
start_deps=start_deps,
|
||||||
|
strategy=convergence_strategy_from_opts(options),
|
||||||
|
do_build=build_action_from_opts(options),
|
||||||
|
timeout=timeout,
|
||||||
|
detached=detached,
|
||||||
|
remove_orphans=remove_orphans,
|
||||||
|
ignore_orphans=ignore_orphans,
|
||||||
|
scale_override=parse_scale_args(options['--scale']),
|
||||||
|
start=not no_start,
|
||||||
|
always_recreate_deps=always_recreate_deps,
|
||||||
|
rebuild=True
|
||||||
)
|
)
|
||||||
|
|
||||||
if detached or no_start:
|
if detached or no_start:
|
||||||
|
|
|
@ -8,6 +8,7 @@ from threading import Semaphore
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
|
|
||||||
from docker.errors import APIError
|
from docker.errors import APIError
|
||||||
|
from docker.errors import ImageNotFound
|
||||||
from six.moves import _thread as thread
|
from six.moves import _thread as thread
|
||||||
from six.moves.queue import Empty
|
from six.moves.queue import Empty
|
||||||
from six.moves.queue import Queue
|
from six.moves.queue import Queue
|
||||||
|
@ -53,10 +54,7 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, pa
|
||||||
|
|
||||||
writer = ParallelStreamWriter(stream, msg)
|
writer = ParallelStreamWriter(stream, msg)
|
||||||
|
|
||||||
if parent_objects:
|
display_objects = list(parent_objects) if parent_objects else objects
|
||||||
display_objects = list(parent_objects)
|
|
||||||
else:
|
|
||||||
display_objects = objects
|
|
||||||
|
|
||||||
for obj in display_objects:
|
for obj in display_objects:
|
||||||
writer.add_object(get_name(obj))
|
writer.add_object(get_name(obj))
|
||||||
|
@ -76,6 +74,12 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, pa
|
||||||
if exception is None:
|
if exception is None:
|
||||||
writer.write(get_name(obj), 'done', green)
|
writer.write(get_name(obj), 'done', green)
|
||||||
results.append(result)
|
results.append(result)
|
||||||
|
elif isinstance(exception, ImageNotFound):
|
||||||
|
# This is to bubble up ImageNotFound exceptions to the client so we
|
||||||
|
# can prompt the user if they want to rebuild.
|
||||||
|
errors[get_name(obj)] = exception.explanation
|
||||||
|
writer.write(get_name(obj), 'error', red)
|
||||||
|
error_to_reraise = exception
|
||||||
elif isinstance(exception, APIError):
|
elif isinstance(exception, APIError):
|
||||||
errors[get_name(obj)] = exception.explanation
|
errors[get_name(obj)] = exception.explanation
|
||||||
writer.write(get_name(obj), 'error', red)
|
writer.write(get_name(obj), 'error', red)
|
||||||
|
|
|
@ -442,6 +442,7 @@ class Project(object):
|
||||||
remove_orphans=False,
|
remove_orphans=False,
|
||||||
ignore_orphans=False,
|
ignore_orphans=False,
|
||||||
scale_override=None,
|
scale_override=None,
|
||||||
|
rebuild=False,
|
||||||
rescale=True,
|
rescale=True,
|
||||||
start=True,
|
start=True,
|
||||||
always_recreate_deps=False):
|
always_recreate_deps=False):
|
||||||
|
@ -472,6 +473,7 @@ class Project(object):
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
detached=detached,
|
detached=detached,
|
||||||
scale_override=scale_override.get(service.name),
|
scale_override=scale_override.get(service.name),
|
||||||
|
rebuild=rebuild,
|
||||||
rescale=rescale,
|
rescale=rescale,
|
||||||
start=start,
|
start=start,
|
||||||
project_services=scaled_services
|
project_services=scaled_services
|
||||||
|
|
|
@ -280,6 +280,7 @@ class Service(object):
|
||||||
previous_container=None,
|
previous_container=None,
|
||||||
number=None,
|
number=None,
|
||||||
quiet=False,
|
quiet=False,
|
||||||
|
rebuild=False,
|
||||||
**override_options):
|
**override_options):
|
||||||
"""
|
"""
|
||||||
Create a container for this service. If the image doesn't exist, attempt to pull
|
Create a container for this service. If the image doesn't exist, attempt to pull
|
||||||
|
@ -293,6 +294,7 @@ class Service(object):
|
||||||
override_options,
|
override_options,
|
||||||
number or self._next_container_number(one_off=one_off),
|
number or self._next_container_number(one_off=one_off),
|
||||||
one_off=one_off,
|
one_off=one_off,
|
||||||
|
rebuild=rebuild,
|
||||||
previous_container=previous_container,
|
previous_container=previous_container,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -409,7 +411,7 @@ class Service(object):
|
||||||
|
|
||||||
return containers
|
return containers
|
||||||
|
|
||||||
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
|
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start, rebuild):
|
||||||
if scale is not None and len(containers) > scale:
|
if scale is not None and len(containers) > scale:
|
||||||
self._downscale(containers[scale:], timeout)
|
self._downscale(containers[scale:], timeout)
|
||||||
containers = containers[:scale]
|
containers = containers[:scale]
|
||||||
|
@ -417,7 +419,7 @@ class Service(object):
|
||||||
def recreate(container):
|
def recreate(container):
|
||||||
return self.recreate_container(
|
return self.recreate_container(
|
||||||
container, timeout=timeout, attach_logs=not detached,
|
container, timeout=timeout, attach_logs=not detached,
|
||||||
start_new_container=start
|
start_new_container=start, rebuild=rebuild
|
||||||
)
|
)
|
||||||
containers, errors = parallel_execute(
|
containers, errors = parallel_execute(
|
||||||
containers,
|
containers,
|
||||||
|
@ -468,7 +470,8 @@ class Service(object):
|
||||||
)
|
)
|
||||||
|
|
||||||
def execute_convergence_plan(self, plan, timeout=None, detached=False,
|
def execute_convergence_plan(self, plan, timeout=None, detached=False,
|
||||||
start=True, scale_override=None, rescale=True, project_services=None):
|
start=True, scale_override=None, rebuild=False,
|
||||||
|
rescale=True, project_services=None):
|
||||||
(action, containers) = plan
|
(action, containers) = plan
|
||||||
scale = scale_override if scale_override is not None else self.scale_num
|
scale = scale_override if scale_override is not None else self.scale_num
|
||||||
containers = sorted(containers, key=attrgetter('number'))
|
containers = sorted(containers, key=attrgetter('number'))
|
||||||
|
@ -487,7 +490,7 @@ class Service(object):
|
||||||
|
|
||||||
if action == 'recreate':
|
if action == 'recreate':
|
||||||
return self._execute_convergence_recreate(
|
return self._execute_convergence_recreate(
|
||||||
containers, scale, timeout, detached, start
|
containers, scale, timeout, detached, start, rebuild
|
||||||
)
|
)
|
||||||
|
|
||||||
if action == 'start':
|
if action == 'start':
|
||||||
|
@ -512,6 +515,7 @@ class Service(object):
|
||||||
container,
|
container,
|
||||||
timeout=None,
|
timeout=None,
|
||||||
attach_logs=False,
|
attach_logs=False,
|
||||||
|
rebuild=False,
|
||||||
start_new_container=True):
|
start_new_container=True):
|
||||||
"""Recreate a container.
|
"""Recreate a container.
|
||||||
|
|
||||||
|
@ -526,6 +530,7 @@ class Service(object):
|
||||||
previous_container=container,
|
previous_container=container,
|
||||||
number=container.labels.get(LABEL_CONTAINER_NUMBER),
|
number=container.labels.get(LABEL_CONTAINER_NUMBER),
|
||||||
quiet=True,
|
quiet=True,
|
||||||
|
rebuild=rebuild
|
||||||
)
|
)
|
||||||
if attach_logs:
|
if attach_logs:
|
||||||
new_container.attach_log_stream()
|
new_container.attach_log_stream()
|
||||||
|
@ -746,6 +751,7 @@ class Service(object):
|
||||||
override_options,
|
override_options,
|
||||||
number,
|
number,
|
||||||
one_off=False,
|
one_off=False,
|
||||||
|
rebuild=False,
|
||||||
previous_container=None):
|
previous_container=None):
|
||||||
add_config_hash = (not one_off and not override_options)
|
add_config_hash = (not one_off and not override_options)
|
||||||
|
|
||||||
|
@ -795,7 +801,7 @@ class Service(object):
|
||||||
override_options.get('labels'))
|
override_options.get('labels'))
|
||||||
|
|
||||||
container_options, override_options = self._build_container_volume_options(
|
container_options, override_options = self._build_container_volume_options(
|
||||||
previous_container, container_options, override_options
|
previous_container, container_options, override_options, rebuild
|
||||||
)
|
)
|
||||||
|
|
||||||
container_options['image'] = self.image_name
|
container_options['image'] = self.image_name
|
||||||
|
@ -822,7 +828,8 @@ class Service(object):
|
||||||
container_options['environment'])
|
container_options['environment'])
|
||||||
return container_options
|
return container_options
|
||||||
|
|
||||||
def _build_container_volume_options(self, previous_container, container_options, override_options):
|
def _build_container_volume_options(self, previous_container, container_options,
|
||||||
|
override_options, rebuild):
|
||||||
container_volumes = []
|
container_volumes = []
|
||||||
container_mounts = []
|
container_mounts = []
|
||||||
if 'volumes' in container_options:
|
if 'volumes' in container_options:
|
||||||
|
@ -833,7 +840,7 @@ class Service(object):
|
||||||
|
|
||||||
binds, affinity = merge_volume_bindings(
|
binds, affinity = merge_volume_bindings(
|
||||||
container_volumes, self.options.get('tmpfs') or [], previous_container,
|
container_volumes, self.options.get('tmpfs') or [], previous_container,
|
||||||
container_mounts
|
container_mounts, rebuild
|
||||||
)
|
)
|
||||||
override_options['binds'] = binds
|
override_options['binds'] = binds
|
||||||
container_options['environment'].update(affinity)
|
container_options['environment'].update(affinity)
|
||||||
|
@ -1281,7 +1288,7 @@ def parse_repository_tag(repo_path):
|
||||||
# Volumes
|
# Volumes
|
||||||
|
|
||||||
|
|
||||||
def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
|
def merge_volume_bindings(volumes, tmpfs, previous_container, mounts, rebuild):
|
||||||
"""
|
"""
|
||||||
Return a list of volume bindings for a container. Container data volumes
|
Return a list of volume bindings for a container. Container data volumes
|
||||||
are replaced by those from the previous container.
|
are replaced by those from the previous container.
|
||||||
|
@ -1297,7 +1304,7 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
|
||||||
|
|
||||||
if previous_container:
|
if previous_container:
|
||||||
old_volumes, old_mounts = get_container_data_volumes(
|
old_volumes, old_mounts = get_container_data_volumes(
|
||||||
previous_container, volumes, tmpfs, mounts
|
previous_container, volumes, tmpfs, mounts, rebuild
|
||||||
)
|
)
|
||||||
warn_on_masked_volume(volumes, old_volumes, previous_container.service)
|
warn_on_masked_volume(volumes, old_volumes, previous_container.service)
|
||||||
volume_bindings.update(
|
volume_bindings.update(
|
||||||
|
@ -1310,12 +1317,34 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
|
||||||
return list(volume_bindings.values()), affinity
|
return list(volume_bindings.values()), affinity
|
||||||
|
|
||||||
|
|
||||||
def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
|
def try_get_image_volumes(container, rebuild):
|
||||||
|
"""
|
||||||
|
Try to get the volumes from the existing container. If the image does
|
||||||
|
not exist, prompt the user to either continue (rebuild the image from
|
||||||
|
scratch) or raise an exception.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
image_volumes = [
|
||||||
|
VolumeSpec.parse(volume)
|
||||||
|
for volume in
|
||||||
|
container.image_config['ContainerConfig'].get('Volumes') or {}
|
||||||
|
]
|
||||||
|
return image_volumes
|
||||||
|
except ImageNotFound:
|
||||||
|
if rebuild:
|
||||||
|
# This will force Compose to rebuild the images.
|
||||||
|
return []
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option, rebuild):
|
||||||
"""
|
"""
|
||||||
Find the container data volumes that are in `volumes_option`, and return
|
Find the container data volumes that are in `volumes_option`, and return
|
||||||
a mapping of volume bindings for those volumes.
|
a mapping of volume bindings for those volumes.
|
||||||
Anonymous volume mounts are updated in place instead.
|
Anonymous volume mounts are updated in place instead.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
volumes = []
|
volumes = []
|
||||||
volumes_option = volumes_option or []
|
volumes_option = volumes_option or []
|
||||||
|
|
||||||
|
@ -1324,11 +1353,7 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
|
||||||
for mount in container.get('Mounts') or {}
|
for mount in container.get('Mounts') or {}
|
||||||
)
|
)
|
||||||
|
|
||||||
image_volumes = [
|
image_volumes = try_get_image_volumes(container, rebuild)
|
||||||
VolumeSpec.parse(volume)
|
|
||||||
for volume in
|
|
||||||
container.image_config['ContainerConfig'].get('Volumes') or {}
|
|
||||||
]
|
|
||||||
|
|
||||||
for volume in set(volumes_option + image_volumes):
|
for volume in set(volumes_option + image_volumes):
|
||||||
# No need to preserve host volumes
|
# No need to preserve host volumes
|
||||||
|
|
|
@ -923,7 +923,19 @@ class ServiceVolumesTest(unittest.TestCase):
|
||||||
VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
|
VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
|
||||||
]
|
]
|
||||||
|
|
||||||
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [])
|
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [], False)
|
||||||
|
assert sorted(volumes) == sorted(expected)
|
||||||
|
|
||||||
|
# Issue 5465, check for non-existant image.
|
||||||
|
|
||||||
|
container = Container(self.mock_client, {
|
||||||
|
'Image': None,
|
||||||
|
'Mounts': []
|
||||||
|
}, has_been_inspected=True)
|
||||||
|
|
||||||
|
expected = []
|
||||||
|
|
||||||
|
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [], False)
|
||||||
assert sorted(volumes) == sorted(expected)
|
assert sorted(volumes) == sorted(expected)
|
||||||
|
|
||||||
def test_merge_volume_bindings(self):
|
def test_merge_volume_bindings(self):
|
||||||
|
@ -959,7 +971,7 @@ class ServiceVolumesTest(unittest.TestCase):
|
||||||
'existingvolume:/existing/volume:rw',
|
'existingvolume:/existing/volume:rw',
|
||||||
]
|
]
|
||||||
|
|
||||||
binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [])
|
binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [], False)
|
||||||
assert sorted(binds) == sorted(expected)
|
assert sorted(binds) == sorted(expected)
|
||||||
assert affinity == {'affinity:container': '=cdefab'}
|
assert affinity == {'affinity:container': '=cdefab'}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue