mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Fix merge conflicts from #5519
This commit is contained in:
commit
61d283e2ad
3 changed files with 31 additions and 15 deletions
|
@ -296,17 +296,17 @@ class PlayBook(object):
|
||||||
|
|
||||||
# *****************************************************
|
# *****************************************************
|
||||||
|
|
||||||
def _list_available_hosts(self, *args):
|
def _trim_unavailable_hosts(self, hostlist=[]):
|
||||||
''' returns a list of hosts that haven't failed and aren't dark '''
|
''' returns a list of hosts that haven't failed and aren't dark '''
|
||||||
|
|
||||||
return [ h for h in self.inventory.list_hosts(*args) if (h not in self.stats.failures) and (h not in self.stats.dark)]
|
return [ h for h in hostlist if (h not in self.stats.failures) and (h not in self.stats.dark)]
|
||||||
|
|
||||||
# *****************************************************
|
# *****************************************************
|
||||||
|
|
||||||
def _run_task_internal(self, task):
|
def _run_task_internal(self, task):
|
||||||
''' run a particular module step in a playbook '''
|
''' run a particular module step in a playbook '''
|
||||||
|
|
||||||
hosts = self._list_available_hosts()
|
hosts = self._trim_unavailable_hosts(task.play._play_hosts)
|
||||||
self.inventory.restrict_to(hosts)
|
self.inventory.restrict_to(hosts)
|
||||||
|
|
||||||
runner = ansible.runner.Runner(
|
runner = ansible.runner.Runner(
|
||||||
|
@ -342,7 +342,8 @@ class PlayBook(object):
|
||||||
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
|
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
|
||||||
su=task.su,
|
su=task.su,
|
||||||
su_user=task.su_user,
|
su_user=task.su_user,
|
||||||
su_pass=task.su_pass
|
su_pass=task.su_pass,
|
||||||
|
run_hosts=hosts
|
||||||
)
|
)
|
||||||
|
|
||||||
if task.async_seconds == 0:
|
if task.async_seconds == 0:
|
||||||
|
@ -462,7 +463,7 @@ class PlayBook(object):
|
||||||
if play.gather_facts is False:
|
if play.gather_facts is False:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
host_list = self._list_available_hosts(play.hosts)
|
host_list = self._trim_unavailable_hosts(play._play_hosts)
|
||||||
|
|
||||||
self.callbacks.on_setup()
|
self.callbacks.on_setup()
|
||||||
self.inventory.restrict_to(host_list)
|
self.inventory.restrict_to(host_list)
|
||||||
|
@ -542,8 +543,10 @@ class PlayBook(object):
|
||||||
''' run a list of tasks for a given pattern, in order '''
|
''' run a list of tasks for a given pattern, in order '''
|
||||||
|
|
||||||
self.callbacks.on_play_start(play.name)
|
self.callbacks.on_play_start(play.name)
|
||||||
|
# Get the hosts for this play
|
||||||
|
play._play_hosts = self.inventory.list_hosts(play.hosts)
|
||||||
# if no hosts matches this play, drop out
|
# if no hosts matches this play, drop out
|
||||||
if not self.inventory.list_hosts(play.hosts):
|
if not play._play_hosts:
|
||||||
self.callbacks.on_no_hosts_matched()
|
self.callbacks.on_no_hosts_matched()
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -552,8 +555,9 @@ class PlayBook(object):
|
||||||
|
|
||||||
# now with that data, handle contentional variable file imports!
|
# now with that data, handle contentional variable file imports!
|
||||||
|
|
||||||
all_hosts = self._list_available_hosts(play.hosts)
|
all_hosts = self._trim_unavailable_hosts(play._play_hosts)
|
||||||
play.update_vars_files(all_hosts)
|
play.update_vars_files(all_hosts)
|
||||||
|
hosts_count = len(all_hosts)
|
||||||
|
|
||||||
serialized_batch = []
|
serialized_batch = []
|
||||||
if play.serial <= 0:
|
if play.serial <= 0:
|
||||||
|
@ -569,6 +573,9 @@ class PlayBook(object):
|
||||||
|
|
||||||
for on_hosts in serialized_batch:
|
for on_hosts in serialized_batch:
|
||||||
|
|
||||||
|
# restrict the play to just the hosts we have in our on_hosts block that are
|
||||||
|
# available.
|
||||||
|
play._play_hosts = self._trim_unavailable_hosts(on_hosts)
|
||||||
self.inventory.also_restrict_to(on_hosts)
|
self.inventory.also_restrict_to(on_hosts)
|
||||||
|
|
||||||
for task in play.tasks():
|
for task in play.tasks():
|
||||||
|
@ -591,7 +598,7 @@ class PlayBook(object):
|
||||||
# prevent duplicate handler includes from running more than once
|
# prevent duplicate handler includes from running more than once
|
||||||
fired_names[handler_name] = 1
|
fired_names[handler_name] = 1
|
||||||
|
|
||||||
host_list = self._list_available_hosts(play.hosts)
|
host_list = self._trim_unavailable_hosts(play._play_hosts)
|
||||||
if handler.any_errors_fatal and len(host_list) < hosts_count:
|
if handler.any_errors_fatal and len(host_list) < hosts_count:
|
||||||
play.max_fail_pct = 0
|
play.max_fail_pct = 0
|
||||||
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
|
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
|
||||||
|
@ -610,8 +617,6 @@ class PlayBook(object):
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
hosts_count = len(self._list_available_hosts(play.hosts))
|
|
||||||
|
|
||||||
# only run the task if the requested tags match
|
# only run the task if the requested tags match
|
||||||
should_run = False
|
should_run = False
|
||||||
for x in self.only_tags:
|
for x in self.only_tags:
|
||||||
|
@ -634,7 +639,9 @@ class PlayBook(object):
|
||||||
# just didn't match anything and that's ok
|
# just didn't match anything and that's ok
|
||||||
return False
|
return False
|
||||||
|
|
||||||
host_list = self._list_available_hosts(play.hosts)
|
# Get a new list of what hosts are left as available, the ones that
|
||||||
|
# did not go fail/dark during the task
|
||||||
|
host_list = self._trim_unavailable_hosts(play._play_hosts)
|
||||||
|
|
||||||
# Set max_fail_pct to 0, So if any hosts fails, bail out
|
# Set max_fail_pct to 0, So if any hosts fails, bail out
|
||||||
if task.any_errors_fatal and len(host_list) < hosts_count:
|
if task.any_errors_fatal and len(host_list) < hosts_count:
|
||||||
|
|
|
@ -34,7 +34,7 @@ class Play(object):
|
||||||
'handlers', 'remote_user', 'remote_port', 'included_roles', 'accelerate',
|
'handlers', 'remote_user', 'remote_port', 'included_roles', 'accelerate',
|
||||||
'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'transport', 'playbook',
|
'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'transport', 'playbook',
|
||||||
'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks',
|
'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks',
|
||||||
'basedir', 'any_errors_fatal', 'roles', 'max_fail_pct', 'su', 'su_user'
|
'basedir', 'any_errors_fatal', 'roles', 'max_fail_pct', '_play_hosts', 'su', 'su_user'
|
||||||
]
|
]
|
||||||
|
|
||||||
# to catch typos and so forth -- these are userland names
|
# to catch typos and so forth -- these are userland names
|
||||||
|
@ -137,6 +137,8 @@ class Play(object):
|
||||||
if self.sudo_user != 'root':
|
if self.sudo_user != 'root':
|
||||||
self.sudo = True
|
self.sudo = True
|
||||||
|
|
||||||
|
# place holder for the discovered hosts to be used in this play
|
||||||
|
self._play_hosts = None
|
||||||
|
|
||||||
# *************************************************
|
# *************************************************
|
||||||
|
|
||||||
|
|
|
@ -143,7 +143,8 @@ class Runner(object):
|
||||||
accelerate_port=None, # port to use with accelerated connection
|
accelerate_port=None, # port to use with accelerated connection
|
||||||
su=False, # Are we running our command via su?
|
su=False, # Are we running our command via su?
|
||||||
su_user=None, # User to su to when running command, ex: 'root'
|
su_user=None, # User to su to when running command, ex: 'root'
|
||||||
su_pass=C.DEFAULT_SU_PASS
|
su_pass=C.DEFAULT_SU_PASS,
|
||||||
|
run_hosts=None, # an optional list of pre-calculated hosts to run on
|
||||||
):
|
):
|
||||||
|
|
||||||
# used to lock multiprocess inputs and outputs at various levels
|
# used to lock multiprocess inputs and outputs at various levels
|
||||||
|
@ -211,6 +212,10 @@ class Runner(object):
|
||||||
# don't override subset when passed from playbook
|
# don't override subset when passed from playbook
|
||||||
self.inventory.subset(subset)
|
self.inventory.subset(subset)
|
||||||
|
|
||||||
|
# If we get a pre-built list of hosts to run on, from say a playbook, use them.
|
||||||
|
# Also where we will store the hosts to run on once discovered
|
||||||
|
self.run_hosts = run_hosts
|
||||||
|
|
||||||
if self.transport == 'local':
|
if self.transport == 'local':
|
||||||
self.remote_user = pwd.getpwuid(os.geteuid())[0]
|
self.remote_user = pwd.getpwuid(os.geteuid())[0]
|
||||||
|
|
||||||
|
@ -1034,7 +1039,7 @@ class Runner(object):
|
||||||
results2["dark"][host] = result.result
|
results2["dark"][host] = result.result
|
||||||
|
|
||||||
# hosts which were contacted but never got a chance to return
|
# hosts which were contacted but never got a chance to return
|
||||||
for host in self.inventory.list_hosts(self.pattern):
|
for host in self.run_hosts:
|
||||||
if not (host in results2['dark'] or host in results2['contacted']):
|
if not (host in results2['dark'] or host in results2['contacted']):
|
||||||
results2["dark"][host] = {}
|
results2["dark"][host] = {}
|
||||||
return results2
|
return results2
|
||||||
|
@ -1045,7 +1050,9 @@ class Runner(object):
|
||||||
''' xfer & run module on all matched hosts '''
|
''' xfer & run module on all matched hosts '''
|
||||||
|
|
||||||
# find hosts that match the pattern
|
# find hosts that match the pattern
|
||||||
hosts = self.inventory.list_hosts(self.pattern)
|
if not self.run_hosts:
|
||||||
|
self.run_hosts = self.inventory.list_hosts(self.pattern)
|
||||||
|
hosts = self.run_hosts
|
||||||
if len(hosts) == 0:
|
if len(hosts) == 0:
|
||||||
self.callbacks.on_no_hosts()
|
self.callbacks.on_no_hosts()
|
||||||
return dict(contacted={}, dark={})
|
return dict(contacted={}, dark={})
|
||||||
|
|
Loading…
Add table
Reference in a new issue