mirror of https://github.com/docker/compose.git
Merge pull request #1702 from aanand/smart-recreate-by-default
Smart recreate by default
This commit is contained in:
commit
c8643828d2
|
@ -429,17 +429,22 @@ class TopLevelCommand(Command):
|
||||||
|
|
||||||
def up(self, project, options):
|
def up(self, project, options):
|
||||||
"""
|
"""
|
||||||
Build, (re)create, start and attach to containers for a service.
|
Builds, (re)creates, starts, and attaches to containers for a service.
|
||||||
|
|
||||||
By default, `docker-compose up` will aggregate the output of each container, and
|
Unless they are already running, this command also starts any linked services.
|
||||||
when it exits, all containers will be stopped. If you run `docker-compose up -d`,
|
|
||||||
it'll start the containers in the background and leave them running.
|
|
||||||
|
|
||||||
If there are existing containers for a service, `docker-compose up` will stop
|
The `docker-compose up` command aggregates the output of each container. When
|
||||||
and recreate them (preserving mounted volumes with volumes-from),
|
the command exits, all containers are stopped. Running `docker-compose up -d`
|
||||||
so that changes in `docker-compose.yml` are picked up. If you do not want existing
|
starts the containers in the background and leaves them running.
|
||||||
containers to be recreated, `docker-compose up --no-recreate` will re-use existing
|
|
||||||
containers.
|
If there are existing containers for a service, and the service's configuration
|
||||||
|
or image was changed after the container's creation, `docker-compose up` picks
|
||||||
|
up the changes by stopping and recreating the containers (preserving mounted
|
||||||
|
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
|
||||||
|
flag.
|
||||||
|
|
||||||
|
If you want to force Compose to stop and recreate all containers, use the
|
||||||
|
`--force-recreate` flag.
|
||||||
|
|
||||||
Usage: up [options] [SERVICE...]
|
Usage: up [options] [SERVICE...]
|
||||||
|
|
||||||
|
@ -450,9 +455,10 @@ class TopLevelCommand(Command):
|
||||||
print new container names.
|
print new container names.
|
||||||
--no-color Produce monochrome output.
|
--no-color Produce monochrome output.
|
||||||
--no-deps Don't start linked services.
|
--no-deps Don't start linked services.
|
||||||
--x-smart-recreate Only recreate containers whose configuration or
|
--force-recreate Recreate containers even if their configuration and
|
||||||
image needs to be updated. (EXPERIMENTAL)
|
image haven't changed. Incompatible with --no-recreate.
|
||||||
--no-recreate If containers already exist, don't recreate them.
|
--no-recreate If containers already exist, don't recreate them.
|
||||||
|
Incompatible with --force-recreate.
|
||||||
--no-build Don't build an image, even if it's missing
|
--no-build Don't build an image, even if it's missing
|
||||||
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
|
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
|
||||||
when attached or when containers are already
|
when attached or when containers are already
|
||||||
|
@ -465,15 +471,18 @@ class TopLevelCommand(Command):
|
||||||
|
|
||||||
start_deps = not options['--no-deps']
|
start_deps = not options['--no-deps']
|
||||||
allow_recreate = not options['--no-recreate']
|
allow_recreate = not options['--no-recreate']
|
||||||
smart_recreate = options['--x-smart-recreate']
|
force_recreate = options['--force-recreate']
|
||||||
service_names = options['SERVICE']
|
service_names = options['SERVICE']
|
||||||
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
|
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
|
||||||
|
|
||||||
|
if force_recreate and not allow_recreate:
|
||||||
|
raise UserError("--force-recreate and --no-recreate cannot be combined.")
|
||||||
|
|
||||||
to_attach = project.up(
|
to_attach = project.up(
|
||||||
service_names=service_names,
|
service_names=service_names,
|
||||||
start_deps=start_deps,
|
start_deps=start_deps,
|
||||||
allow_recreate=allow_recreate,
|
allow_recreate=allow_recreate,
|
||||||
smart_recreate=smart_recreate,
|
force_recreate=force_recreate,
|
||||||
insecure_registry=insecure_registry,
|
insecure_registry=insecure_registry,
|
||||||
do_build=not options['--no-build'],
|
do_build=not options['--no-build'],
|
||||||
timeout=timeout
|
timeout=timeout
|
||||||
|
|
|
@ -223,11 +223,14 @@ class Project(object):
|
||||||
service_names=None,
|
service_names=None,
|
||||||
start_deps=True,
|
start_deps=True,
|
||||||
allow_recreate=True,
|
allow_recreate=True,
|
||||||
smart_recreate=False,
|
force_recreate=False,
|
||||||
insecure_registry=False,
|
insecure_registry=False,
|
||||||
do_build=True,
|
do_build=True,
|
||||||
timeout=DEFAULT_TIMEOUT):
|
timeout=DEFAULT_TIMEOUT):
|
||||||
|
|
||||||
|
if force_recreate and not allow_recreate:
|
||||||
|
raise ValueError("force_recreate and allow_recreate are in conflict")
|
||||||
|
|
||||||
services = self.get_services(service_names, include_deps=start_deps)
|
services = self.get_services(service_names, include_deps=start_deps)
|
||||||
|
|
||||||
for service in services:
|
for service in services:
|
||||||
|
@ -236,7 +239,7 @@ class Project(object):
|
||||||
plans = self._get_convergence_plans(
|
plans = self._get_convergence_plans(
|
||||||
services,
|
services,
|
||||||
allow_recreate=allow_recreate,
|
allow_recreate=allow_recreate,
|
||||||
smart_recreate=smart_recreate,
|
force_recreate=force_recreate,
|
||||||
)
|
)
|
||||||
|
|
||||||
return [
|
return [
|
||||||
|
@ -253,7 +256,7 @@ class Project(object):
|
||||||
def _get_convergence_plans(self,
|
def _get_convergence_plans(self,
|
||||||
services,
|
services,
|
||||||
allow_recreate=True,
|
allow_recreate=True,
|
||||||
smart_recreate=False):
|
force_recreate=False):
|
||||||
|
|
||||||
plans = {}
|
plans = {}
|
||||||
|
|
||||||
|
@ -265,19 +268,19 @@ class Project(object):
|
||||||
and plans[name].action == 'recreate'
|
and plans[name].action == 'recreate'
|
||||||
]
|
]
|
||||||
|
|
||||||
if updated_dependencies:
|
if updated_dependencies and allow_recreate:
|
||||||
log.debug(
|
log.debug(
|
||||||
'%s has upstream changes (%s)',
|
'%s has upstream changes (%s)',
|
||||||
service.name, ", ".join(updated_dependencies),
|
service.name, ", ".join(updated_dependencies),
|
||||||
)
|
)
|
||||||
plan = service.convergence_plan(
|
plan = service.convergence_plan(
|
||||||
allow_recreate=allow_recreate,
|
allow_recreate=allow_recreate,
|
||||||
smart_recreate=False,
|
force_recreate=True,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
plan = service.convergence_plan(
|
plan = service.convergence_plan(
|
||||||
allow_recreate=allow_recreate,
|
allow_recreate=allow_recreate,
|
||||||
smart_recreate=smart_recreate,
|
force_recreate=force_recreate,
|
||||||
)
|
)
|
||||||
|
|
||||||
plans[service.name] = plan
|
plans[service.name] = plan
|
||||||
|
|
|
@ -264,25 +264,28 @@ class Service(object):
|
||||||
|
|
||||||
def convergence_plan(self,
|
def convergence_plan(self,
|
||||||
allow_recreate=True,
|
allow_recreate=True,
|
||||||
smart_recreate=False):
|
force_recreate=False):
|
||||||
|
|
||||||
|
if force_recreate and not allow_recreate:
|
||||||
|
raise ValueError("force_recreate and allow_recreate are in conflict")
|
||||||
|
|
||||||
containers = self.containers(stopped=True)
|
containers = self.containers(stopped=True)
|
||||||
|
|
||||||
if not containers:
|
if not containers:
|
||||||
return ConvergencePlan('create', [])
|
return ConvergencePlan('create', [])
|
||||||
|
|
||||||
if smart_recreate and not self._containers_have_diverged(containers):
|
|
||||||
stopped = [c for c in containers if not c.is_running]
|
|
||||||
|
|
||||||
if stopped:
|
|
||||||
return ConvergencePlan('start', stopped)
|
|
||||||
|
|
||||||
return ConvergencePlan('noop', containers)
|
|
||||||
|
|
||||||
if not allow_recreate:
|
if not allow_recreate:
|
||||||
return ConvergencePlan('start', containers)
|
return ConvergencePlan('start', containers)
|
||||||
|
|
||||||
return ConvergencePlan('recreate', containers)
|
if force_recreate or self._containers_have_diverged(containers):
|
||||||
|
return ConvergencePlan('recreate', containers)
|
||||||
|
|
||||||
|
stopped = [c for c in containers if not c.is_running]
|
||||||
|
|
||||||
|
if stopped:
|
||||||
|
return ConvergencePlan('start', stopped)
|
||||||
|
|
||||||
|
return ConvergencePlan('noop', containers)
|
||||||
|
|
||||||
def _containers_have_diverged(self, containers):
|
def _containers_have_diverged(self, containers):
|
||||||
config_hash = None
|
config_hash = None
|
||||||
|
|
17
docs/cli.md
17
docs/cli.md
|
@ -127,15 +127,20 @@ Stops running containers without removing them. They can be started again with
|
||||||
|
|
||||||
Builds, (re)creates, starts, and attaches to containers for a service.
|
Builds, (re)creates, starts, and attaches to containers for a service.
|
||||||
|
|
||||||
Linked services will be started, unless they are already running.
|
Unless they are already running, this command also starts any linked services.
|
||||||
|
|
||||||
By default, `docker-compose up` will aggregate the output of each container and,
|
The `docker-compose up` command aggregates the output of each container. When
|
||||||
when it exits, all containers will be stopped. Running `docker-compose up -d`,
|
the command exits, all containers are stopped. Running `docker-compose up -d`
|
||||||
will start the containers in the background and leave them running.
|
starts the containers in the background and leaves them running.
|
||||||
|
|
||||||
By default, if there are existing containers for a service, `docker-compose up` will stop and recreate them (preserving mounted volumes with [volumes-from]), so that changes in `docker-compose.yml` are picked up. If you do not want containers stopped and recreated, use `docker-compose up --no-recreate`. This will still start any stopped containers, if needed.
|
If there are existing containers for a service, and the service's configuration
|
||||||
|
or image was changed after the container's creation, `docker-compose up` picks
|
||||||
|
up the changes by stopping and recreating the containers (preserving mounted
|
||||||
|
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
|
||||||
|
flag.
|
||||||
|
|
||||||
[volumes-from]: http://docs.docker.io/en/latest/use/working_with_volumes/
|
If you want to force Compose to stop and recreate all containers, use the
|
||||||
|
`--force-recreate` flag.
|
||||||
|
|
||||||
## Options
|
## Options
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ from mock import patch
|
||||||
|
|
||||||
from .testcases import DockerClientTestCase
|
from .testcases import DockerClientTestCase
|
||||||
from compose.cli.main import TopLevelCommand
|
from compose.cli.main import TopLevelCommand
|
||||||
|
from compose.cli.errors import UserError
|
||||||
from compose.project import NoSuchService
|
from compose.project import NoSuchService
|
||||||
|
|
||||||
|
|
||||||
|
@ -136,21 +137,21 @@ class CLITestCase(DockerClientTestCase):
|
||||||
self.assertEqual(len(db.containers()), 0)
|
self.assertEqual(len(db.containers()), 0)
|
||||||
self.assertEqual(len(console.containers()), 0)
|
self.assertEqual(len(console.containers()), 0)
|
||||||
|
|
||||||
def test_up_with_recreate(self):
|
def test_up_with_force_recreate(self):
|
||||||
self.command.dispatch(['up', '-d'], None)
|
self.command.dispatch(['up', '-d'], None)
|
||||||
service = self.project.get_service('simple')
|
service = self.project.get_service('simple')
|
||||||
self.assertEqual(len(service.containers()), 1)
|
self.assertEqual(len(service.containers()), 1)
|
||||||
|
|
||||||
old_ids = [c.id for c in service.containers()]
|
old_ids = [c.id for c in service.containers()]
|
||||||
|
|
||||||
self.command.dispatch(['up', '-d'], None)
|
self.command.dispatch(['up', '-d', '--force-recreate'], None)
|
||||||
self.assertEqual(len(service.containers()), 1)
|
self.assertEqual(len(service.containers()), 1)
|
||||||
|
|
||||||
new_ids = [c.id for c in service.containers()]
|
new_ids = [c.id for c in service.containers()]
|
||||||
|
|
||||||
self.assertNotEqual(old_ids, new_ids)
|
self.assertNotEqual(old_ids, new_ids)
|
||||||
|
|
||||||
def test_up_with_keep_old(self):
|
def test_up_with_no_recreate(self):
|
||||||
self.command.dispatch(['up', '-d'], None)
|
self.command.dispatch(['up', '-d'], None)
|
||||||
service = self.project.get_service('simple')
|
service = self.project.get_service('simple')
|
||||||
self.assertEqual(len(service.containers()), 1)
|
self.assertEqual(len(service.containers()), 1)
|
||||||
|
@ -164,6 +165,10 @@ class CLITestCase(DockerClientTestCase):
|
||||||
|
|
||||||
self.assertEqual(old_ids, new_ids)
|
self.assertEqual(old_ids, new_ids)
|
||||||
|
|
||||||
|
def test_up_with_force_recreate_and_no_recreate(self):
|
||||||
|
with self.assertRaises(UserError):
|
||||||
|
self.command.dispatch(['up', '-d', '--force-recreate', '--no-recreate'], None)
|
||||||
|
|
||||||
def test_up_with_timeout(self):
|
def test_up_with_timeout(self):
|
||||||
self.command.dispatch(['up', '-d', '-t', '1'], None)
|
self.command.dispatch(['up', '-d', '-t', '1'], None)
|
||||||
service = self.project.get_service('simple')
|
service = self.project.get_service('simple')
|
||||||
|
|
|
@ -197,7 +197,7 @@ class ProjectTest(DockerClientTestCase):
|
||||||
self.assertEqual(len(db.containers()), 1)
|
self.assertEqual(len(db.containers()), 1)
|
||||||
self.assertEqual(len(web.containers()), 1)
|
self.assertEqual(len(web.containers()), 1)
|
||||||
|
|
||||||
def test_project_up_recreates_containers(self):
|
def test_recreate_preserves_volumes(self):
|
||||||
web = self.create_service('web')
|
web = self.create_service('web')
|
||||||
db = self.create_service('db', volumes=['/etc'])
|
db = self.create_service('db', volumes=['/etc'])
|
||||||
project = Project('composetest', [web, db], self.client)
|
project = Project('composetest', [web, db], self.client)
|
||||||
|
@ -209,7 +209,7 @@ class ProjectTest(DockerClientTestCase):
|
||||||
old_db_id = project.containers()[0].id
|
old_db_id = project.containers()[0].id
|
||||||
db_volume_path = project.containers()[0].get('Volumes./etc')
|
db_volume_path = project.containers()[0].get('Volumes./etc')
|
||||||
|
|
||||||
project.up()
|
project.up(force_recreate=True)
|
||||||
self.assertEqual(len(project.containers()), 2)
|
self.assertEqual(len(project.containers()), 2)
|
||||||
|
|
||||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||||
|
|
|
@ -17,14 +17,14 @@ class ResilienceTest(DockerClientTestCase):
|
||||||
self.host_path = container.get('Volumes')['/var/db']
|
self.host_path = container.get('Volumes')['/var/db']
|
||||||
|
|
||||||
def test_successful_recreate(self):
|
def test_successful_recreate(self):
|
||||||
self.project.up()
|
self.project.up(force_recreate=True)
|
||||||
container = self.db.containers()[0]
|
container = self.db.containers()[0]
|
||||||
self.assertEqual(container.get('Volumes')['/var/db'], self.host_path)
|
self.assertEqual(container.get('Volumes')['/var/db'], self.host_path)
|
||||||
|
|
||||||
def test_create_failure(self):
|
def test_create_failure(self):
|
||||||
with mock.patch('compose.service.Service.create_container', crash):
|
with mock.patch('compose.service.Service.create_container', crash):
|
||||||
with self.assertRaises(Crash):
|
with self.assertRaises(Crash):
|
||||||
self.project.up()
|
self.project.up(force_recreate=True)
|
||||||
|
|
||||||
self.project.up()
|
self.project.up()
|
||||||
container = self.db.containers()[0]
|
container = self.db.containers()[0]
|
||||||
|
@ -33,7 +33,7 @@ class ResilienceTest(DockerClientTestCase):
|
||||||
def test_start_failure(self):
|
def test_start_failure(self):
|
||||||
with mock.patch('compose.service.Service.start_container', crash):
|
with mock.patch('compose.service.Service.start_container', crash):
|
||||||
with self.assertRaises(Crash):
|
with self.assertRaises(Crash):
|
||||||
self.project.up()
|
self.project.up(force_recreate=True)
|
||||||
|
|
||||||
self.project.up()
|
self.project.up()
|
||||||
container = self.db.containers()[0]
|
container = self.db.containers()[0]
|
||||||
|
|
|
@ -12,7 +12,6 @@ from .testcases import DockerClientTestCase
|
||||||
|
|
||||||
class ProjectTestCase(DockerClientTestCase):
|
class ProjectTestCase(DockerClientTestCase):
|
||||||
def run_up(self, cfg, **kwargs):
|
def run_up(self, cfg, **kwargs):
|
||||||
kwargs.setdefault('smart_recreate', True)
|
|
||||||
kwargs.setdefault('timeout', 1)
|
kwargs.setdefault('timeout', 1)
|
||||||
|
|
||||||
project = self.make_project(cfg)
|
project = self.make_project(cfg)
|
||||||
|
@ -155,7 +154,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||||
|
|
||||||
def converge(service,
|
def converge(service,
|
||||||
allow_recreate=True,
|
allow_recreate=True,
|
||||||
smart_recreate=False,
|
force_recreate=False,
|
||||||
insecure_registry=False,
|
insecure_registry=False,
|
||||||
do_build=True):
|
do_build=True):
|
||||||
"""
|
"""
|
||||||
|
@ -164,7 +163,7 @@ def converge(service,
|
||||||
"""
|
"""
|
||||||
plan = service.convergence_plan(
|
plan = service.convergence_plan(
|
||||||
allow_recreate=allow_recreate,
|
allow_recreate=allow_recreate,
|
||||||
smart_recreate=smart_recreate,
|
force_recreate=force_recreate,
|
||||||
)
|
)
|
||||||
|
|
||||||
return service.execute_convergence_plan(
|
return service.execute_convergence_plan(
|
||||||
|
@ -180,7 +179,7 @@ class ServiceStateTest(DockerClientTestCase):
|
||||||
|
|
||||||
def test_trigger_create(self):
|
def test_trigger_create(self):
|
||||||
web = self.create_service('web')
|
web = self.create_service('web')
|
||||||
self.assertEqual(('create', []), web.convergence_plan(smart_recreate=True))
|
self.assertEqual(('create', []), web.convergence_plan())
|
||||||
|
|
||||||
def test_trigger_noop(self):
|
def test_trigger_noop(self):
|
||||||
web = self.create_service('web')
|
web = self.create_service('web')
|
||||||
|
@ -188,7 +187,7 @@ class ServiceStateTest(DockerClientTestCase):
|
||||||
web.start()
|
web.start()
|
||||||
|
|
||||||
web = self.create_service('web')
|
web = self.create_service('web')
|
||||||
self.assertEqual(('noop', [container]), web.convergence_plan(smart_recreate=True))
|
self.assertEqual(('noop', [container]), web.convergence_plan())
|
||||||
|
|
||||||
def test_trigger_start(self):
|
def test_trigger_start(self):
|
||||||
options = dict(command=["top"])
|
options = dict(command=["top"])
|
||||||
|
@ -205,7 +204,7 @@ class ServiceStateTest(DockerClientTestCase):
|
||||||
web = self.create_service('web', **options)
|
web = self.create_service('web', **options)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
('start', containers[0:1]),
|
('start', containers[0:1]),
|
||||||
web.convergence_plan(smart_recreate=True),
|
web.convergence_plan(),
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_trigger_recreate_with_config_change(self):
|
def test_trigger_recreate_with_config_change(self):
|
||||||
|
@ -213,14 +212,14 @@ class ServiceStateTest(DockerClientTestCase):
|
||||||
container = web.create_container()
|
container = web.create_container()
|
||||||
|
|
||||||
web = self.create_service('web', command=["top", "-d", "1"])
|
web = self.create_service('web', command=["top", "-d", "1"])
|
||||||
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
|
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||||
|
|
||||||
def test_trigger_recreate_with_nonexistent_image_tag(self):
|
def test_trigger_recreate_with_nonexistent_image_tag(self):
|
||||||
web = self.create_service('web', image="busybox:latest")
|
web = self.create_service('web', image="busybox:latest")
|
||||||
container = web.create_container()
|
container = web.create_container()
|
||||||
|
|
||||||
web = self.create_service('web', image="nonexistent-image")
|
web = self.create_service('web', image="nonexistent-image")
|
||||||
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
|
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||||
|
|
||||||
def test_trigger_recreate_with_image_change(self):
|
def test_trigger_recreate_with_image_change(self):
|
||||||
repo = 'composetest_myimage'
|
repo = 'composetest_myimage'
|
||||||
|
@ -240,7 +239,7 @@ class ServiceStateTest(DockerClientTestCase):
|
||||||
self.client.remove_container(c)
|
self.client.remove_container(c)
|
||||||
|
|
||||||
web = self.create_service('web', image=image)
|
web = self.create_service('web', image=image)
|
||||||
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
|
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
self.client.remove_image(image)
|
self.client.remove_image(image)
|
||||||
|
@ -263,7 +262,7 @@ class ServiceStateTest(DockerClientTestCase):
|
||||||
web.build()
|
web.build()
|
||||||
|
|
||||||
web = self.create_service('web', build=context)
|
web = self.create_service('web', build=context)
|
||||||
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
|
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||||
finally:
|
finally:
|
||||||
shutil.rmtree(context)
|
shutil.rmtree(context)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue