mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Adds the 'serial' keyword to a playbook which controls how many hosts can be running through a playbook at a single time.
The default is 0, which means all hosts. If set to 1, each host would run a playbook all the way through before moving on the next host. Fact gathering is still parallel, regardless of the serial setting.
This commit is contained in:
parent
e13c33bb86
commit
898d7676f7
4 changed files with 62 additions and 23 deletions
|
@ -40,6 +40,7 @@ Ansible Changes By Release
|
|||
* add pattern= as a paramter to the service module
|
||||
* various fixes to mysql & postresql modules
|
||||
* adds 'delegate_to' for a task, which can be used to signal outage windows and load balancers on behalf of hosts
|
||||
* adds 'serial' to playbook, allowing you to specify how many hosts can be processing a playbook at one time (default 0=all)
|
||||
|
||||
0.6 "Cabo" -- August 6, 2012
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ class Inventory(object):
|
|||
Host inventory for ansible.
|
||||
"""
|
||||
|
||||
__slots__ = [ 'host_list', 'groups', '_restriction', '_subset', '_is_script',
|
||||
__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', '_is_script',
|
||||
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache' ]
|
||||
|
||||
def __init__(self, host_list=C.DEFAULT_HOST_LIST):
|
||||
|
@ -55,6 +55,7 @@ class Inventory(object):
|
|||
|
||||
# a list of host(names) to contain current inquiries to
|
||||
self._restriction = None
|
||||
self._also_restriction = None
|
||||
self._subset = None
|
||||
|
||||
# whether the inventory file is a script
|
||||
|
@ -122,6 +123,8 @@ class Inventory(object):
|
|||
# exclude hosts mentioned in any restriction (ex: failed hosts)
|
||||
if self._restriction is not None:
|
||||
hosts = [ h for h in hosts if h.name in self._restriction ]
|
||||
if self._also_restriction is not None:
|
||||
hosts = [ h for h in hosts if h.name in self._also_restriction ]
|
||||
|
||||
return sorted(hosts, key=lambda x: x.name)
|
||||
|
||||
|
@ -281,6 +284,7 @@ class Inventory(object):
|
|||
def list_groups(self):
|
||||
return sorted([ g.name for g in self.groups ], key=lambda x: x.name)
|
||||
|
||||
# TODO: remove this function
|
||||
def get_restriction(self):
|
||||
return self._restriction
|
||||
|
||||
|
@ -294,6 +298,15 @@ class Inventory(object):
|
|||
restriction = [ restriction ]
|
||||
self._restriction = restriction
|
||||
|
||||
def also_restrict_to(self, restriction):
|
||||
"""
|
||||
Works like restict_to but offers an additional restriction. Playbooks use this
|
||||
to implement serial behavior.
|
||||
"""
|
||||
if type(restriction) != list:
|
||||
restriction = [ restriction ]
|
||||
self._also_restriction = restriction
|
||||
|
||||
def subset(self, subset_pattern):
|
||||
"""
|
||||
Limits inventory results to a subset of inventory that matches a given
|
||||
|
@ -308,9 +321,12 @@ class Inventory(object):
|
|||
|
||||
def lift_restriction(self):
|
||||
""" Do not restrict list operations """
|
||||
|
||||
self._restriction = None
|
||||
|
||||
def lift_also_restriction(self):
|
||||
""" Clears the also restriction """
|
||||
self._also_restriction = None
|
||||
|
||||
def is_file(self):
|
||||
""" did inventory come from a file? """
|
||||
if not isinstance(self.host_list, basestring):
|
||||
|
|
|
@ -150,8 +150,8 @@ class PlayBook(object):
|
|||
# loop through all patterns and run them
|
||||
self.callbacks.on_start()
|
||||
for play_ds in self.playbook:
|
||||
self._run_play(Play(self,play_ds))
|
||||
|
||||
play = Play(self,play_ds)
|
||||
self._run_play(play)
|
||||
# summarize the results
|
||||
results = {}
|
||||
for host in self.stats.processed.keys():
|
||||
|
@ -301,7 +301,25 @@ class PlayBook(object):
|
|||
self._do_setup_step(play)
|
||||
|
||||
# now with that data, handle contentional variable file imports!
|
||||
play.update_vars_files(self.inventory.list_hosts(play.hosts))
|
||||
|
||||
all_hosts = self.inventory.list_hosts(play.hosts)
|
||||
play.update_vars_files(all_hosts)
|
||||
|
||||
serialized_batch = []
|
||||
if play.serial <= 0:
|
||||
serialized_batch = [all_hosts]
|
||||
else:
|
||||
# do N forks all the way through before moving to next
|
||||
while len(all_hosts) > 0:
|
||||
play_hosts = []
|
||||
for x in range(1, play.serial):
|
||||
if len(all_hosts) > 0:
|
||||
play_hosts.append(all_hosts.pop())
|
||||
serialized_batch.append(play_hosts)
|
||||
|
||||
for on_hosts in serialized_batch:
|
||||
|
||||
self.inventory.also_restrict_to(on_hosts)
|
||||
|
||||
for task in play.tasks():
|
||||
# only run the task if the requested tags match
|
||||
|
@ -320,3 +338,6 @@ class PlayBook(object):
|
|||
self.inventory.restrict_to(handler.notified_by)
|
||||
self._run_task(play, handler, True)
|
||||
self.inventory.lift_restriction()
|
||||
|
||||
self.inventory.lift_also_restriction()
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ class Play(object):
|
|||
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
|
||||
'handlers', 'remote_user', 'remote_port',
|
||||
'sudo', 'sudo_user', 'transport', 'playbook',
|
||||
'tags', 'gather_facts', '_ds', '_handlers', '_tasks'
|
||||
'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks'
|
||||
]
|
||||
|
||||
# to catch typos and so forth -- these are userland names
|
||||
|
@ -37,7 +37,7 @@ class Play(object):
|
|||
VALID_KEYS = [
|
||||
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
|
||||
'tasks', 'handlers', 'user', 'port', 'include',
|
||||
'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts'
|
||||
'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial'
|
||||
]
|
||||
|
||||
# *************************************************
|
||||
|
@ -75,6 +75,7 @@ class Play(object):
|
|||
self.transport = ds.get('connection', self.playbook.transport)
|
||||
self.tags = ds.get('tags', None)
|
||||
self.gather_facts = ds.get('gather_facts', True)
|
||||
self.serial = ds.get('serial', 0)
|
||||
|
||||
self._update_vars_files_for_host(None)
|
||||
|
||||
|
|
Loading…
Reference in a new issue