From 2d986dff79f6d8a63f66ad46e9c231e0d7910165 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Thu, 11 Jan 2018 16:37:09 -0800 Subject: [PATCH] Remove rebuild parameter ; set do_build instead Reduce repetition in main.py Signed-off-by: Joffrey F --- compose/cli/main.py | 25 ++++++---------------- compose/project.py | 2 -- compose/service.py | 44 +++++++++++++------------------------- tests/unit/service_test.py | 14 +++++++++--- 4 files changed, 33 insertions(+), 52 deletions(-) diff --git a/compose/cli/main.py b/compose/cli/main.py index bc10c5c4f..14f7a745c 100644 --- a/compose/cli/main.py +++ b/compose/cli/main.py @@ -977,12 +977,12 @@ class TopLevelCommand(object): raise UserError('--no-start and {} cannot be combined.'.format(excluded)) with up_shutdown_context(self.project, service_names, timeout, detached): - try: - to_attach = self.project.up( + def up(rebuild): + return self.project.up( service_names=service_names, start_deps=start_deps, strategy=convergence_strategy_from_opts(options), - do_build=build_action_from_opts(options), + do_build=build_action_from_opts(options) if not rebuild else BuildAction.force, timeout=timeout, detached=detached, remove_orphans=remove_orphans, @@ -990,8 +990,10 @@ class TopLevelCommand(object): scale_override=parse_scale_args(options['--scale']), start=not no_start, always_recreate_deps=always_recreate_deps, - rebuild=False ) + + try: + to_attach = up(False) except docker.errors.ImageNotFound as e: log.error(("Image not found. If you continue, there is a " "risk of data loss. Consider backing up your data " @@ -1002,20 +1004,7 @@ class TopLevelCommand(object): if res is None or not res: raise e - to_attach = self.project.up( - service_names=service_names, - start_deps=start_deps, - strategy=convergence_strategy_from_opts(options), - do_build=build_action_from_opts(options), - timeout=timeout, - detached=detached, - remove_orphans=remove_orphans, - ignore_orphans=ignore_orphans, - scale_override=parse_scale_args(options['--scale']), - start=not no_start, - always_recreate_deps=always_recreate_deps, - rebuild=True - ) + to_attach = up(True) if detached or no_start: return diff --git a/compose/project.py b/compose/project.py index f69aaa7d7..8e1c6a14d 100644 --- a/compose/project.py +++ b/compose/project.py @@ -442,7 +442,6 @@ class Project(object): remove_orphans=False, ignore_orphans=False, scale_override=None, - rebuild=False, rescale=True, start=True, always_recreate_deps=False): @@ -473,7 +472,6 @@ class Project(object): timeout=timeout, detached=detached, scale_override=scale_override.get(service.name), - rebuild=rebuild, rescale=rescale, start=start, project_services=scaled_services diff --git a/compose/service.py b/compose/service.py index 37bd2ca2f..195257c39 100644 --- a/compose/service.py +++ b/compose/service.py @@ -280,7 +280,6 @@ class Service(object): previous_container=None, number=None, quiet=False, - rebuild=False, **override_options): """ Create a container for this service. If the image doesn't exist, attempt to pull @@ -294,7 +293,6 @@ class Service(object): override_options, number or self._next_container_number(one_off=one_off), one_off=one_off, - rebuild=rebuild, previous_container=previous_container, ) @@ -411,7 +409,7 @@ class Service(object): return containers - def _execute_convergence_recreate(self, containers, scale, timeout, detached, start, rebuild): + def _execute_convergence_recreate(self, containers, scale, timeout, detached, start): if scale is not None and len(containers) > scale: self._downscale(containers[scale:], timeout) containers = containers[:scale] @@ -419,7 +417,7 @@ class Service(object): def recreate(container): return self.recreate_container( container, timeout=timeout, attach_logs=not detached, - start_new_container=start, rebuild=rebuild + start_new_container=start ) containers, errors = parallel_execute( containers, @@ -470,7 +468,7 @@ class Service(object): ) def execute_convergence_plan(self, plan, timeout=None, detached=False, - start=True, scale_override=None, rebuild=False, + start=True, scale_override=None, rescale=True, project_services=None): (action, containers) = plan scale = scale_override if scale_override is not None else self.scale_num @@ -490,7 +488,7 @@ class Service(object): if action == 'recreate': return self._execute_convergence_recreate( - containers, scale, timeout, detached, start, rebuild + containers, scale, timeout, detached, start ) if action == 'start': @@ -510,13 +508,7 @@ class Service(object): raise Exception("Invalid action: {}".format(action)) - def recreate_container( - self, - container, - timeout=None, - attach_logs=False, - rebuild=False, - start_new_container=True): + def recreate_container(self, container, timeout=None, attach_logs=False, start_new_container=True): """Recreate a container. The original container is renamed to a temporary name so that data @@ -530,7 +522,6 @@ class Service(object): previous_container=container, number=container.labels.get(LABEL_CONTAINER_NUMBER), quiet=True, - rebuild=rebuild ) if attach_logs: new_container.attach_log_stream() @@ -751,7 +742,6 @@ class Service(object): override_options, number, one_off=False, - rebuild=False, previous_container=None): add_config_hash = (not one_off and not override_options) @@ -801,7 +791,7 @@ class Service(object): override_options.get('labels')) container_options, override_options = self._build_container_volume_options( - previous_container, container_options, override_options, rebuild + previous_container, container_options, override_options ) container_options['image'] = self.image_name @@ -828,8 +818,7 @@ class Service(object): container_options['environment']) return container_options - def _build_container_volume_options(self, previous_container, container_options, - override_options, rebuild): + def _build_container_volume_options(self, previous_container, container_options, override_options): container_volumes = [] container_mounts = [] if 'volumes' in container_options: @@ -840,7 +829,7 @@ class Service(object): binds, affinity = merge_volume_bindings( container_volumes, self.options.get('tmpfs') or [], previous_container, - container_mounts, rebuild + container_mounts ) override_options['binds'] = binds container_options['environment'].update(affinity) @@ -1288,7 +1277,7 @@ def parse_repository_tag(repo_path): # Volumes -def merge_volume_bindings(volumes, tmpfs, previous_container, mounts, rebuild): +def merge_volume_bindings(volumes, tmpfs, previous_container, mounts): """ Return a list of volume bindings for a container. Container data volumes are replaced by those from the previous container. @@ -1304,7 +1293,7 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts, rebuild): if previous_container: old_volumes, old_mounts = get_container_data_volumes( - previous_container, volumes, tmpfs, mounts, rebuild + previous_container, volumes, tmpfs, mounts ) warn_on_masked_volume(volumes, old_volumes, previous_container.service) volume_bindings.update( @@ -1317,11 +1306,11 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts, rebuild): return list(volume_bindings.values()), affinity -def try_get_image_volumes(container, rebuild): +def try_get_image_volumes(container): """ Try to get the volumes from the existing container. If the image does - not exist, prompt the user to either continue (rebuild the image from - scratch) or raise an exception. + not exist, raise an exception that will be caught at the CLI level to + prompt user for a rebuild. """ try: @@ -1332,13 +1321,10 @@ def try_get_image_volumes(container, rebuild): ] return image_volumes except ImageNotFound: - if rebuild: - # This will force Compose to rebuild the images. - return [] raise -def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option, rebuild): +def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option): """ Find the container data volumes that are in `volumes_option`, and return a mapping of volume bindings for those volumes. @@ -1353,7 +1339,7 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o for mount in container.get('Mounts') or {} ) - image_volumes = try_get_image_volumes(container, rebuild) + image_volumes = try_get_image_volumes(container) for volume in set(volumes_option + image_volumes): # No need to preserve host volumes diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py index 44f14e58b..ef250a4d7 100644 --- a/tests/unit/service_test.py +++ b/tests/unit/service_test.py @@ -923,10 +923,18 @@ class ServiceVolumesTest(unittest.TestCase): VolumeSpec.parse('imagedata:/mnt/image/data:rw'), ] - volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [], False) + volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], []) assert sorted(volumes) == sorted(expected) + def test_get_container_data_volumes_image_is_none(self): # Issue 5465, check for non-existant image. + options = [VolumeSpec.parse(v) for v in [ + '/host/volume:/host/volume:ro', + '/new/volume', + '/existing/volume', + 'named:/named/vol', + '/dev/tmpfs' + ]] container = Container(self.mock_client, { 'Image': None, @@ -935,7 +943,7 @@ class ServiceVolumesTest(unittest.TestCase): expected = [] - volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [], False) + volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], []) assert sorted(volumes) == sorted(expected) def test_merge_volume_bindings(self): @@ -971,7 +979,7 @@ class ServiceVolumesTest(unittest.TestCase): 'existingvolume:/existing/volume:rw', ] - binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [], False) + binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, []) assert sorted(binds) == sorted(expected) assert affinity == {'affinity:container': '=cdefab'}