mirror of https://github.com/docker/compose.git
Improve ImageNotFound handling in convergence
- Improved CLI prompt - Attempt volume preservation with new image config - Fix container.rename_to_tmp_name() - Integration test replicates behavior ; remove obsolete unit test Signed-off-by: Joffrey F <joffrey@docker.com>
This commit is contained in:
parent
4042121f6e
commit
9ba9016cbc
|
@ -976,12 +976,14 @@ class TopLevelCommand(object):
|
|||
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
|
||||
|
||||
with up_shutdown_context(self.project, service_names, timeout, detached):
|
||||
warn_for_swarm_mode(self.project.client)
|
||||
|
||||
def up(rebuild):
|
||||
return self.project.up(
|
||||
service_names=service_names,
|
||||
start_deps=start_deps,
|
||||
strategy=convergence_strategy_from_opts(options),
|
||||
do_build=build_action_from_opts(options) if not rebuild else BuildAction.force,
|
||||
do_build=build_action_from_opts(options),
|
||||
timeout=timeout,
|
||||
detached=detached,
|
||||
remove_orphans=remove_orphans,
|
||||
|
@ -989,17 +991,18 @@ class TopLevelCommand(object):
|
|||
scale_override=parse_scale_args(options['--scale']),
|
||||
start=not no_start,
|
||||
always_recreate_deps=always_recreate_deps,
|
||||
reset_container_image=rebuild,
|
||||
)
|
||||
|
||||
try:
|
||||
to_attach = up(False)
|
||||
except docker.errors.ImageNotFound as e:
|
||||
log.error(("Image not found. If you continue, there is a "
|
||||
"risk of data loss. Consider backing up your data "
|
||||
"before continuing.\n\n"
|
||||
"Full error message: {}\n"
|
||||
).format(e.explanation))
|
||||
res = yesno("Continue by rebuilding the image(s)? [yN]", False)
|
||||
log.error(
|
||||
"The image for the service you're trying to recreate has been removed. "
|
||||
"If you continue, volume data could be lost. Consider backing up your data "
|
||||
"before continuing.\n".format(e.explanation)
|
||||
)
|
||||
res = yesno("Continue with the new image? [yN]", False)
|
||||
if res is None or not res:
|
||||
raise e
|
||||
|
||||
|
@ -1426,3 +1429,19 @@ def build_filter(arg):
|
|||
key, val = arg.split('=', 1)
|
||||
filt[key] = val
|
||||
return filt
|
||||
|
||||
|
||||
def warn_for_swarm_mode(client):
|
||||
info = client.info()
|
||||
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
|
||||
if info.get('ServerVersion', '').startswith('ucp'):
|
||||
# UCP does multi-node scheduling with traditional Compose files.
|
||||
return
|
||||
|
||||
log.warn(
|
||||
"The Docker Engine you're using is running in swarm mode.\n\n"
|
||||
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
|
||||
"All containers will be scheduled on the current node.\n\n"
|
||||
"To deploy your application across the swarm, "
|
||||
"use `docker stack deploy`.\n"
|
||||
)
|
||||
|
|
|
@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
|||
from functools import reduce
|
||||
|
||||
import six
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from .const import LABEL_CONTAINER_NUMBER
|
||||
from .const import LABEL_PROJECT
|
||||
|
@ -66,15 +67,17 @@ class Container(object):
|
|||
def name(self):
|
||||
return self.dictionary['Name'][1:]
|
||||
|
||||
@property
|
||||
def project(self):
|
||||
return self.labels.get(LABEL_PROJECT)
|
||||
|
||||
@property
|
||||
def service(self):
|
||||
return self.labels.get(LABEL_SERVICE)
|
||||
|
||||
@property
|
||||
def name_without_project(self):
|
||||
project = self.labels.get(LABEL_PROJECT)
|
||||
|
||||
if self.name.startswith('{0}_{1}'.format(project, self.service)):
|
||||
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
|
||||
return '{0}_{1}'.format(self.service, self.number)
|
||||
else:
|
||||
return self.name
|
||||
|
@ -230,10 +233,10 @@ class Container(object):
|
|||
"""Rename the container to a hopefully unique temporary container name
|
||||
by prepending the short id.
|
||||
"""
|
||||
self.client.rename(
|
||||
self.id,
|
||||
'%s_%s' % (self.short_id, self.name)
|
||||
)
|
||||
if not self.name.startswith(self.short_id):
|
||||
self.client.rename(
|
||||
self.id, '{0}_{1}'.format(self.short_id, self.name)
|
||||
)
|
||||
|
||||
def inspect_if_not_inspected(self):
|
||||
if not self.has_been_inspected:
|
||||
|
@ -250,6 +253,21 @@ class Container(object):
|
|||
self.has_been_inspected = True
|
||||
return self.dictionary
|
||||
|
||||
def image_exists(self):
|
||||
try:
|
||||
self.client.inspect_image(self.image)
|
||||
except ImageNotFound:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def reset_image(self, img_id):
|
||||
""" If this container's image has been removed, temporarily replace the old image ID
|
||||
with `img_id`.
|
||||
"""
|
||||
if not self.image_exists():
|
||||
self.dictionary['Image'] = img_id
|
||||
|
||||
def attach(self, *args, **kwargs):
|
||||
return self.client.attach(self.id, *args, **kwargs)
|
||||
|
||||
|
|
|
@ -444,9 +444,8 @@ class Project(object):
|
|||
scale_override=None,
|
||||
rescale=True,
|
||||
start=True,
|
||||
always_recreate_deps=False):
|
||||
|
||||
warn_for_swarm_mode(self.client)
|
||||
always_recreate_deps=False,
|
||||
reset_container_image=False):
|
||||
|
||||
self.initialize()
|
||||
if not ignore_orphans:
|
||||
|
@ -474,7 +473,8 @@ class Project(object):
|
|||
scale_override=scale_override.get(service.name),
|
||||
rescale=rescale,
|
||||
start=start,
|
||||
project_services=scaled_services
|
||||
project_services=scaled_services,
|
||||
reset_container_image=reset_container_image
|
||||
)
|
||||
|
||||
def get_deps(service):
|
||||
|
@ -686,22 +686,6 @@ def get_secrets(service, service_secrets, secret_defs):
|
|||
return secrets
|
||||
|
||||
|
||||
def warn_for_swarm_mode(client):
|
||||
info = client.info()
|
||||
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
|
||||
if info.get('ServerVersion', '').startswith('ucp'):
|
||||
# UCP does multi-node scheduling with traditional Compose files.
|
||||
return
|
||||
|
||||
log.warn(
|
||||
"The Docker Engine you're using is running in swarm mode.\n\n"
|
||||
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
|
||||
"All containers will be scheduled on the current node.\n\n"
|
||||
"To deploy your application across the swarm, "
|
||||
"use `docker stack deploy`.\n"
|
||||
)
|
||||
|
||||
|
||||
class NoSuchService(Exception):
|
||||
def __init__(self, name):
|
||||
if isinstance(name, six.binary_type):
|
||||
|
|
|
@ -469,7 +469,8 @@ class Service(object):
|
|||
|
||||
def execute_convergence_plan(self, plan, timeout=None, detached=False,
|
||||
start=True, scale_override=None,
|
||||
rescale=True, project_services=None):
|
||||
rescale=True, project_services=None,
|
||||
reset_container_image=False):
|
||||
(action, containers) = plan
|
||||
scale = scale_override if scale_override is not None else self.scale_num
|
||||
containers = sorted(containers, key=attrgetter('number'))
|
||||
|
@ -487,6 +488,12 @@ class Service(object):
|
|||
scale = None
|
||||
|
||||
if action == 'recreate':
|
||||
if reset_container_image:
|
||||
# Updating the image ID on the container object lets us recover old volumes if
|
||||
# the new image uses them as well
|
||||
img_id = self.image()['Id']
|
||||
for c in containers:
|
||||
c.reset_image(img_id)
|
||||
return self._execute_convergence_recreate(
|
||||
containers, scale, timeout, detached, start
|
||||
)
|
||||
|
|
|
@ -10,6 +10,7 @@ from os import path
|
|||
|
||||
import pytest
|
||||
from docker.errors import APIError
|
||||
from docker.errors import ImageNotFound
|
||||
from six import StringIO
|
||||
from six import text_type
|
||||
|
||||
|
@ -659,6 +660,35 @@ class ServiceTest(DockerClientTestCase):
|
|||
assert len(service_containers) == 1
|
||||
assert not service_containers[0].is_running
|
||||
|
||||
def test_execute_convergence_plan_image_with_volume_is_removed(self):
|
||||
service = self.create_service(
|
||||
'db', build={'context': 'tests/fixtures/dockerfile-with-volume'}
|
||||
)
|
||||
|
||||
old_container = create_and_start_container(service)
|
||||
assert (
|
||||
[mount['Destination'] for mount in old_container.get('Mounts')] ==
|
||||
['/data']
|
||||
)
|
||||
volume_path = old_container.get_mount('/data')['Source']
|
||||
|
||||
old_container.stop()
|
||||
self.client.remove_image(service.image(), force=True)
|
||||
|
||||
service.ensure_image_exists()
|
||||
with pytest.raises(ImageNotFound):
|
||||
service.execute_convergence_plan(
|
||||
ConvergencePlan('recreate', [old_container])
|
||||
)
|
||||
old_container.inspect() # retrieve new name from server
|
||||
|
||||
new_container, = service.execute_convergence_plan(
|
||||
ConvergencePlan('recreate', [old_container]),
|
||||
reset_container_image=True
|
||||
)
|
||||
assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
|
||||
assert new_container.get_mount('/data')['Source'] == volume_path
|
||||
|
||||
def test_start_container_passes_through_options(self):
|
||||
db = self.create_service('db')
|
||||
create_and_start_container(db, environment={'FOO': 'BAR'})
|
||||
|
|
|
@ -3,6 +3,7 @@ from __future__ import unicode_literals
|
|||
|
||||
import logging
|
||||
|
||||
import docker
|
||||
import pytest
|
||||
|
||||
from compose import container
|
||||
|
@ -11,6 +12,7 @@ from compose.cli.formatter import ConsoleWarningFormatter
|
|||
from compose.cli.main import convergence_strategy_from_opts
|
||||
from compose.cli.main import filter_containers_to_service_names
|
||||
from compose.cli.main import setup_console_handler
|
||||
from compose.cli.main import warn_for_swarm_mode
|
||||
from compose.service import ConvergenceStrategy
|
||||
from tests import mock
|
||||
|
||||
|
@ -54,6 +56,14 @@ class TestCLIMainTestCase(object):
|
|||
actual = filter_containers_to_service_names(containers, service_names)
|
||||
assert actual == containers
|
||||
|
||||
def test_warning_in_swarm_mode(self):
|
||||
mock_client = mock.create_autospec(docker.APIClient)
|
||||
mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
|
||||
|
||||
with mock.patch('compose.cli.main.log') as fake_log:
|
||||
warn_for_swarm_mode(mock_client)
|
||||
assert fake_log.warn.call_count == 1
|
||||
|
||||
|
||||
class TestSetupConsoleHandlerTestCase(object):
|
||||
|
||||
|
|
|
@ -533,14 +533,6 @@ class ProjectTest(unittest.TestCase):
|
|||
project.down(ImageType.all, True)
|
||||
self.mock_client.remove_image.assert_called_once_with("busybox:latest")
|
||||
|
||||
def test_warning_in_swarm_mode(self):
|
||||
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
|
||||
project = Project('composetest', [], self.mock_client)
|
||||
|
||||
with mock.patch('compose.project.log') as fake_log:
|
||||
project.up()
|
||||
assert fake_log.warn.call_count == 1
|
||||
|
||||
def test_no_warning_on_stop(self):
|
||||
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
|
||||
project = Project('composetest', [], self.mock_client)
|
||||
|
|
|
@ -5,7 +5,6 @@ import docker
|
|||
import pytest
|
||||
from docker.constants import DEFAULT_DOCKER_API_VERSION
|
||||
from docker.errors import APIError
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from .. import mock
|
||||
from .. import unittest
|
||||
|
@ -927,33 +926,6 @@ class ServiceVolumesTest(unittest.TestCase):
|
|||
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [])
|
||||
assert sorted(volumes) == sorted(expected)
|
||||
|
||||
def test_get_container_data_volumes_image_does_not_exist(self):
|
||||
# Issue 5465, check for non-existant image.
|
||||
options = [VolumeSpec.parse(v) for v in [
|
||||
'/host/volume:/host/volume:ro',
|
||||
'/new/volume',
|
||||
'/existing/volume',
|
||||
'named:/named/vol',
|
||||
'/dev/tmpfs'
|
||||
]]
|
||||
|
||||
def inspect_fn(image):
|
||||
if image == 'shaDOES_NOT_EXIST':
|
||||
raise ImageNotFound("inspect_fn: {}".format(image))
|
||||
return {'ContainerConfig': None}
|
||||
|
||||
self.mock_client.inspect_image = inspect_fn
|
||||
|
||||
container = Container(self.mock_client, {
|
||||
'Image': 'shaDOES_NOT_EXIST',
|
||||
'Mounts': []
|
||||
}, has_been_inspected=True)
|
||||
|
||||
expected = []
|
||||
|
||||
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [])
|
||||
assert sorted(volumes) == sorted(expected)
|
||||
|
||||
def test_merge_volume_bindings(self):
|
||||
options = [
|
||||
VolumeSpec.parse(v, True) for v in [
|
||||
|
|
Loading…
Reference in New Issue