Refactor parallel execute

Refactored parallel execute and execute create into a single function
parallel_execute that can now handle both cases. This helps untangle it
from being so tightly coupled to the container.

Updated all the relevant operations to use the refactored function.

Signed-off-by: Mazz Mosley <mazz@houseofmnowster.com>
This commit is contained in:
Mazz Mosley 2015-07-21 11:56:59 +01:00
parent 5c29ded6ac
commit da650e9cfd
3 changed files with 72 additions and 87 deletions

View File

@ -198,15 +198,30 @@ class Project(object):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute("stop", self.containers(service_names), "Stopping", "Stopped", **options)
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def kill(self, service_names=None, **options):
parallel_execute("kill", self.containers(service_names), "Killing", "Killed", **options)
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute("remove", stopped_containers, "Removing", "Removed", **options)
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):

View File

@ -24,7 +24,7 @@ from .const import (
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output, StreamOutputError
from .utils import json_hash, parallel_create_execute, parallel_execute
from .utils import json_hash, parallel_execute
log = logging.getLogger(__name__)
@ -162,13 +162,11 @@ class Service(object):
'for this service are created on a single host, the port will clash.'
% self.name)
def create_and_start(number):
container = self.create_container(number=number, quiet=True)
def create_and_start(service, number):
container = service.create_container(number=number, quiet=True)
container.start()
return container
msgs = {'doing': 'Creating', 'done': 'Started'}
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
@ -185,20 +183,31 @@ class Service(object):
next_number, next_number + num_to_create
)
]
parallel_create_execute(create_and_start, container_numbers, msgs)
parallel_execute(
objects=container_numbers,
obj_callable=lambda n: create_and_start(service=self, number=n),
msg_index=lambda n: n,
msg="Creating and starting"
)
if desired_num < num_running:
num_to_stop = num_running - desired_num
sorted_running_containers = sorted(running_containers, key=attrgetter('number'))
containers_to_stop = sorted_running_containers[-num_to_stop:]
if desired_num < num_running:
# count number of running containers.
num_to_stop = num_running - desired_num
containers_to_stop = sorted_running_containers[-num_to_stop:]
# TODO: refactor these out?
parallel_execute("stop", containers_to_stop, "Stopping", "Stopped")
parallel_execute("remove", containers_to_stop, "Removing", "Removed")
# self.remove_stopped()
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.stop(timeout=timeout),
msg_index=lambda c: c.name,
msg="Stopping"
)
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.remove(),
msg_index=lambda c: c.name,
msg="Removing"
)
def remove_stopped(self, **options):
for c in self.containers(stopped=True):

View File

@ -12,114 +12,75 @@ from threading import Thread
log = logging.getLogger(__name__)
def parallel_create_execute(create_function, container_numbers, msgs={}, **options):
def parallel_execute(objects, obj_callable, msg_index, msg):
"""
Parallel container creation by calling the create_function for each new container
number passed in.
For a given list of objects, call the callable passing in the first
object we give it.
"""
stream = codecs.getwriter('utf-8')(sys.stdout)
lines = []
errors = {}
for number in container_numbers:
write_out_msg(stream, lines, number, msgs['doing'])
for obj in objects:
write_out_msg(stream, lines, msg_index(obj), msg)
q = Queue()
def inner_call_function(create_function, number):
def inner_execute_function(an_callable, parameter, msg_index):
try:
container = create_function(number)
result = an_callable(parameter)
except APIError as e:
errors[number] = e.explanation
q.put(container)
errors[msg_index] = e.explanation
result = "error"
q.put((msg_index, result))
for number in container_numbers:
for an_object in objects:
t = Thread(
target=inner_call_function,
args=(create_function, number),
kwargs=options,
target=inner_execute_function,
args=(obj_callable, an_object, msg_index(an_object)),
)
t.daemon = True
t.start()
done = 0
total_to_create = len(container_numbers)
while done < total_to_create:
total_to_execute = len(objects)
while done < total_to_execute:
try:
container = q.get(timeout=1)
write_out_msg(stream, lines, container.name, msgs['done'])
msg_index, result = q.get(timeout=1)
if result == 'error':
write_out_msg(stream, lines, msg_index, msg, status='error')
else:
write_out_msg(stream, lines, msg_index, msg)
done += 1
except Empty:
pass
if errors:
for number in errors:
stream.write("ERROR: for {} {} \n".format(number, errors[number]))
for error in errors:
stream.write("ERROR: for {} {} \n".format(error, errors[error]))
def parallel_execute(command, containers, doing_msg, done_msg, **options):
"""
Execute a given command upon a list of containers in parallel.
"""
stream = codecs.getwriter('utf-8')(sys.stdout)
lines = []
errors = {}
for container in containers:
write_out_msg(stream, lines, container.name, doing_msg)
q = Queue()
def container_command_execute(container, command, **options):
try:
getattr(container, command)(**options)
except APIError as e:
errors[container.name] = e.explanation
q.put(container)
for container in containers:
t = Thread(
target=container_command_execute,
args=(container, command),
kwargs=options,
)
t.daemon = True
t.start()
done = 0
while done < len(containers):
try:
container = q.get(timeout=1)
write_out_msg(stream, lines, container.name, done_msg)
done += 1
except Empty:
pass
if errors:
for container in errors:
stream.write("ERROR: for {} {} \n".format(container, errors[container]))
def write_out_msg(stream, lines, container_name, msg):
def write_out_msg(stream, lines, msg_index, msg, status="done"):
"""
Using special ANSI code characters we can write out the msg over the top of
a previous status message, if it exists.
"""
if container_name in lines:
position = lines.index(container_name)
obj_index = msg_index
if msg_index in lines:
position = lines.index(obj_index)
diff = len(lines) - position
# move up
stream.write("%c[%dA" % (27, diff))
# erase
stream.write("%c[2K\r" % 27)
stream.write("{}: {} \n".format(container_name, msg))
stream.write("{} {}... {}\n".format(msg, obj_index, status))
# move back down
stream.write("%c[%dB" % (27, diff))
else:
diff = 0
lines.append(container_name)
stream.write("{}: {}... \r\n".format(container_name, msg))
lines.append(obj_index)
stream.write("{} {}... \r\n".format(msg, obj_index))
stream.flush()