mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Code cleanup for playbooks, also added 'on_skipped' callback
This commit is contained in:
parent
24964b9660
commit
6ab615c724
5 changed files with 159 additions and 163 deletions
|
@ -60,6 +60,9 @@ class PlaybookCallbacks(object):
|
||||||
else:
|
else:
|
||||||
print "ok: [%s] => %s\n" % (host, invocation)
|
print "ok: [%s] => %s\n" % (host, invocation)
|
||||||
|
|
||||||
|
def on_skipped(self, host):
|
||||||
|
print "skipping: [%s]\n" % host
|
||||||
|
|
||||||
def on_import_for_host(self, host, imported_file):
|
def on_import_for_host(self, host, imported_file):
|
||||||
print "%s: importing %s" % (host, imported_file)
|
print "%s: importing %s" % (host, imported_file)
|
||||||
|
|
||||||
|
|
|
@ -38,11 +38,14 @@ class PlayBook(object):
|
||||||
management, or automation based set of commands to
|
management, or automation based set of commands to
|
||||||
run in series.
|
run in series.
|
||||||
|
|
||||||
multiple patterns do not execute simultaneously,
|
multiple plays/tasks do not execute simultaneously,
|
||||||
but tasks in each pattern do execute in parallel
|
but tasks in each pattern do execute in parallel
|
||||||
according to the number of forks requested.
|
(according to the number of forks requested) among
|
||||||
|
the hosts they address
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
playbook =None,
|
playbook =None,
|
||||||
host_list =C.DEFAULT_HOST_LIST,
|
host_list =C.DEFAULT_HOST_LIST,
|
||||||
|
@ -54,11 +57,6 @@ class PlayBook(object):
|
||||||
verbose=False,
|
verbose=False,
|
||||||
callbacks=None):
|
callbacks=None):
|
||||||
|
|
||||||
# TODO, once ansible-playbook is it's own script this will
|
|
||||||
# have much LESS parameters to the constructor and will
|
|
||||||
# read most everything per pattern from the playbook
|
|
||||||
# and this will be greatly simplified
|
|
||||||
|
|
||||||
self.host_list = host_list
|
self.host_list = host_list
|
||||||
self.module_path = module_path
|
self.module_path = module_path
|
||||||
self.forks = forks
|
self.forks = forks
|
||||||
|
@ -72,12 +70,12 @@ class PlayBook(object):
|
||||||
# store the list of changes/invocations/failure counts
|
# store the list of changes/invocations/failure counts
|
||||||
# as a dictionary of integers keyed off the hostname
|
# as a dictionary of integers keyed off the hostname
|
||||||
|
|
||||||
self.processed = {}
|
|
||||||
self.dark = {}
|
self.dark = {}
|
||||||
self.changed = {}
|
self.changed = {}
|
||||||
self.invocations = {}
|
self.invocations = {}
|
||||||
self.failures = {}
|
self.failures = {}
|
||||||
self.skipped = {}
|
self.skipped = {}
|
||||||
|
self.processed = {}
|
||||||
|
|
||||||
# playbook file can be passed in as a path or
|
# playbook file can be passed in as a path or
|
||||||
# as file contents (to support API usage)
|
# as file contents (to support API usage)
|
||||||
|
@ -86,16 +84,22 @@ class PlayBook(object):
|
||||||
self.playbook = self._parse_playbook(playbook)
|
self.playbook = self._parse_playbook(playbook)
|
||||||
|
|
||||||
self.host_list, self.groups = ansible.runner.Runner.parse_hosts(host_list)
|
self.host_list, self.groups = ansible.runner.Runner.parse_hosts(host_list)
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _get_vars(self, play, dirname):
|
def _get_vars(self, play, dirname):
|
||||||
|
''' load the vars section from a play '''
|
||||||
|
|
||||||
vars = play.get('vars', {})
|
vars = play.get('vars', {})
|
||||||
if type(vars) != dict:
|
if type(vars) != dict:
|
||||||
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
|
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
|
||||||
return vars
|
return vars
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _include_tasks(self, play, task, dirname, new_tasks):
|
def _include_tasks(self, play, task, dirname, new_tasks):
|
||||||
# an include line looks like:
|
''' load tasks included from external files. '''
|
||||||
|
|
||||||
# include: some.yml a=2 b=3 c=4
|
# include: some.yml a=2 b=3 c=4
|
||||||
include_tokens = task['include'].split()
|
include_tokens = task['include'].split()
|
||||||
path = utils.path_dwim(dirname, include_tokens[0])
|
path = utils.path_dwim(dirname, include_tokens[0])
|
||||||
|
@ -109,7 +113,11 @@ class PlayBook(object):
|
||||||
for x in included:
|
for x in included:
|
||||||
new_tasks.append(x)
|
new_tasks.append(x)
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _include_handlers(self, play, handler, dirname, new_handlers):
|
def _include_handlers(self, play, handler, dirname, new_handlers):
|
||||||
|
''' load handlers from external files '''
|
||||||
|
|
||||||
path = utils.path_dwim(dirname, handler['include'])
|
path = utils.path_dwim(dirname, handler['include'])
|
||||||
inject_vars = self._get_vars(play, dirname)
|
inject_vars = self._get_vars(play, dirname)
|
||||||
included = utils.template_from_file(path, inject_vars)
|
included = utils.template_from_file(path, inject_vars)
|
||||||
|
@ -117,6 +125,8 @@ class PlayBook(object):
|
||||||
for x in included:
|
for x in included:
|
||||||
new_handlers.append(x)
|
new_handlers.append(x)
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _parse_playbook(self, playbook):
|
def _parse_playbook(self, playbook):
|
||||||
''' load YAML file, including handling for imported files '''
|
''' load YAML file, including handling for imported files '''
|
||||||
|
|
||||||
|
@ -146,6 +156,8 @@ class PlayBook(object):
|
||||||
play['handlers'] = new_handlers
|
play['handlers'] = new_handlers
|
||||||
|
|
||||||
return playbook
|
return playbook
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
''' run all patterns in the playbook '''
|
''' run all patterns in the playbook '''
|
||||||
|
@ -158,55 +170,74 @@ class PlayBook(object):
|
||||||
# summarize the results
|
# summarize the results
|
||||||
results = {}
|
results = {}
|
||||||
for host in self.processed.keys():
|
for host in self.processed.keys():
|
||||||
results[host] = {
|
results[host] = dict(
|
||||||
'resources' : self.invocations.get(host, 0),
|
resources = self.invocations.get(host, 0),
|
||||||
'changed' : self.changed.get(host, 0),
|
changed = self.changed.get(host, 0),
|
||||||
'dark' : self.dark.get(host, 0),
|
dark = self.dark.get(host, 0),
|
||||||
'failed' : self.failures.get(host, 0),
|
failed = self.failures.get(host, 0),
|
||||||
'skipped' : self.skipped.get(host, 0)
|
skipped = self.skipped.get(host, 0)
|
||||||
}
|
)
|
||||||
|
|
||||||
# FIXME: TODO: use callback to reinstate per-host summary
|
|
||||||
# and add corresponding code in /bin/ansible-playbook
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _prune_failed_hosts(self, host_list):
|
def _prune_failed_hosts(self, host_list):
|
||||||
|
''' given a host list, use the global failure information to trim the list '''
|
||||||
|
|
||||||
new_hosts = []
|
new_hosts = []
|
||||||
for x in host_list:
|
for x in host_list:
|
||||||
if not x in self.failures and not x in self.dark:
|
if not x in self.failures and not x in self.dark:
|
||||||
new_hosts.append(x)
|
new_hosts.append(x)
|
||||||
return new_hosts
|
return new_hosts
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def hosts_to_poll(self, results):
|
def hosts_to_poll(self, results):
|
||||||
''' which hosts need more polling? '''
|
''' which hosts need more polling? '''
|
||||||
|
|
||||||
hosts = []
|
hosts = []
|
||||||
for (host, res) in results['contacted'].iteritems():
|
for (host, res) in results['contacted'].iteritems():
|
||||||
# FIXME: make polling pattern in /bin/ansible match
|
|
||||||
# move to common function in utils
|
|
||||||
if not 'finished' in res and not 'skipped' in res and 'started' in res:
|
if not 'finished' in res and not 'skipped' in res and 'started' in res:
|
||||||
hosts.append(host)
|
hosts.append(host)
|
||||||
return hosts
|
return hosts
|
||||||
|
|
||||||
|
# ****************************************************
|
||||||
|
|
||||||
|
def _compute_aggregrate_counts(self, results, poll=False, setup=False):
|
||||||
|
''' prints results about playbook run + computes stats about per host changes '''
|
||||||
|
|
||||||
|
dark_hosts = results.get('dark',{})
|
||||||
|
contacted_hosts = results.get('contacted',{})
|
||||||
|
for (host, error) in dark_hosts.iteritems():
|
||||||
|
self.processed[host] = 1
|
||||||
|
self.callbacks.on_dark_host(host, error)
|
||||||
|
self.dark[host] = 1
|
||||||
|
for (host, host_result) in contacted_hosts.iteritems():
|
||||||
|
self.processed[host] = 1
|
||||||
|
if 'failed' in host_result:
|
||||||
|
self.callbacks.on_failed(host, host_result)
|
||||||
|
self.failures[host] = 1
|
||||||
|
elif 'skipped' in host_result:
|
||||||
|
self.skipped[host] = self.skipped.get(host, 0) + 1
|
||||||
|
self.callbacks.on_skipped(host)
|
||||||
|
if poll:
|
||||||
|
continue
|
||||||
|
if not setup and ('changed' in host_result):
|
||||||
|
self.invocations[host] = self.invocations.get(host, 0) + 1
|
||||||
|
self.changed[host] = self.changed.get(host, 0) + 1
|
||||||
|
self.callbacks.on_ok(host, host_result)
|
||||||
|
else:
|
||||||
|
self.invocations[host] = self.invocations.get(host, 0) + 1
|
||||||
|
self.callbacks.on_ok(host, host_result)
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _async_poll(self, runner, async_seconds, async_poll_interval, only_if):
|
def _async_poll(self, runner, async_seconds, async_poll_interval, only_if):
|
||||||
''' launch an async job, if poll_interval is set, wait for completion '''
|
''' launch an async job, if poll_interval is set, wait for completion '''
|
||||||
|
|
||||||
# TODO: refactor this function
|
|
||||||
runner.background = async_seconds
|
runner.background = async_seconds
|
||||||
results = runner.run()
|
results = runner.run()
|
||||||
|
self._compute_aggregrate_counts(results, poll=True)
|
||||||
# TODO -- this block is repeated in lots of places, refactor
|
|
||||||
dark_hosts = results.get('dark',{})
|
|
||||||
contacted_hosts = results.get('contacted',{})
|
|
||||||
for (host, error) in dark_hosts.iteritems():
|
|
||||||
self.callbacks.on_dark_host(host, error)
|
|
||||||
self.dark[host] = 1
|
|
||||||
for (host, host_result) in contacted_hosts.iteritems():
|
|
||||||
if 'failed' in host_result:
|
|
||||||
self.callbacks.on_failed(host, host_result)
|
|
||||||
self.failures[host] = 1
|
|
||||||
if 'skipped' in host_result:
|
|
||||||
self.skipped[host] = self.skipped.get(host, 0) + 1
|
|
||||||
|
|
||||||
if async_poll_interval <= 0:
|
if async_poll_interval <= 0:
|
||||||
# if not polling, playbook requested fire and forget
|
# if not polling, playbook requested fire and forget
|
||||||
|
@ -218,10 +249,11 @@ class PlayBook(object):
|
||||||
# no hosts launched ok, return that.
|
# no hosts launched ok, return that.
|
||||||
return results
|
return results
|
||||||
ahost = poll_hosts[0]
|
ahost = poll_hosts[0]
|
||||||
|
|
||||||
jid = results['contacted'][ahost].get('ansible_job_id', None)
|
jid = results['contacted'][ahost].get('ansible_job_id', None)
|
||||||
|
|
||||||
if jid is None:
|
if jid is None:
|
||||||
# FIXME this really shouldn't happen. consider marking hosts failed
|
# note: this really shouldn't happen, ever
|
||||||
# and looking for jid in other host.
|
|
||||||
self.callbacks.on_async_confused("unexpected error: unable to determine jid")
|
self.callbacks.on_async_confused("unexpected error: unable to determine jid")
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
@ -231,13 +263,13 @@ class PlayBook(object):
|
||||||
|
|
||||||
poll_results = results
|
poll_results = results
|
||||||
while (clock >= 0):
|
while (clock >= 0):
|
||||||
|
|
||||||
|
# poll/loop until polling duration complete
|
||||||
|
# FIXME: make a "get_async_runner" method like in /bin/ansible
|
||||||
runner.hosts = poll_hosts
|
runner.hosts = poll_hosts
|
||||||
# FIXME: make a "get_async_runner" method like in /bin/ansible
|
|
||||||
# loop until polling duration complete
|
|
||||||
runner.module_args = [ "jid=%s" % jid ]
|
runner.module_args = [ "jid=%s" % jid ]
|
||||||
runner.module_name = 'async_status'
|
runner.module_name = 'async_status'
|
||||||
# FIXME: make it such that if you say 'async_status' you
|
# FIXME: make it such that if you say 'async_status' you # can't background that op!
|
||||||
# can't background that op!
|
|
||||||
runner.background = 0
|
runner.background = 0
|
||||||
runner.pattern = '*'
|
runner.pattern = '*'
|
||||||
runner.hosts = self.hosts_to_poll(poll_results)
|
runner.hosts = self.hosts_to_poll(poll_results)
|
||||||
|
@ -248,56 +280,38 @@ class PlayBook(object):
|
||||||
if poll_results is None:
|
if poll_results is None:
|
||||||
break
|
break
|
||||||
|
|
||||||
# TODO -- this block is repeated in lots of places, refactor
|
self._compute_aggregrate_counts(poll_results, poll=True)
|
||||||
dark_hosts = poll_results.get('dark',{})
|
|
||||||
contacted_hosts = poll_results.get('contacted',{})
|
|
||||||
for (host, error) in dark_hosts.iteritems():
|
|
||||||
self.callbacks.on_dark_host(host, error)
|
|
||||||
self.dark[host] = 1
|
|
||||||
for (host, host_result) in contacted_hosts.iteritems():
|
|
||||||
if 'failed' in host_result:
|
|
||||||
self.callbacks.on_failed(host, host_result)
|
|
||||||
self.failures[host] = 1
|
|
||||||
if 'skipped' in host_result:
|
|
||||||
# NOTE: callbacks on skipped? should not really
|
|
||||||
# happen at this point in the loop
|
|
||||||
self.skipped[host] = self.skipped.get(host, 0) + 1
|
|
||||||
|
|
||||||
|
|
||||||
|
# mention which hosts we're going to poll again...
|
||||||
for (host, host_result) in poll_results['contacted'].iteritems():
|
for (host, host_result) in poll_results['contacted'].iteritems():
|
||||||
results['contacted'][host] = host_result
|
results['contacted'][host] = host_result
|
||||||
if not host in self.dark and not host in self.failures:
|
if not host in self.dark and not host in self.failures:
|
||||||
# override last result with current status result for report
|
|
||||||
# output if requested
|
|
||||||
self.callbacks.on_async_poll(jid, host, clock, host_result)
|
self.callbacks.on_async_poll(jid, host, clock, host_result)
|
||||||
|
|
||||||
# run down the clock
|
# run down the clock
|
||||||
clock = clock - async_poll_interval
|
clock = clock - async_poll_interval
|
||||||
time.sleep(async_poll_interval)
|
time.sleep(async_poll_interval)
|
||||||
# do not have to poll the completed hosts, smaller list
|
|
||||||
# mark any hosts that are still listed as started as failed
|
# mark any hosts that are still listed as started as failed
|
||||||
# since these likely got killed by async_wrapper
|
# since these likely got killed by async_wrapper
|
||||||
for (host, host_result) in results['contacted'].iteritems():
|
for (host, host_result) in results['contacted'].iteritems():
|
||||||
if 'started' in host_result:
|
if 'started' in host_result:
|
||||||
results['contacted'][host] = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
|
results['contacted'][host] = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _run_module(self, pattern, module, args, hosts, remote_user,
|
def _run_module(self, pattern, module, args, hosts, remote_user,
|
||||||
async_seconds, async_poll_interval, only_if):
|
async_seconds, async_poll_interval, only_if):
|
||||||
|
|
||||||
''' run a particular module step in a playbook '''
|
''' run a particular module step in a playbook '''
|
||||||
|
|
||||||
runner = ansible.runner.Runner(
|
runner = ansible.runner.Runner(
|
||||||
pattern=pattern,
|
pattern=pattern, groups=self.groups, module_name=module,
|
||||||
groups=self.groups,
|
module_args=args, host_list=hosts, forks=self.forks,
|
||||||
module_name=module,
|
remote_pass=self.remote_pass, module_path=self.module_path,
|
||||||
module_args=args,
|
timeout=self.timeout, remote_user=remote_user,
|
||||||
host_list=hosts,
|
setup_cache=SETUP_CACHE, basedir=self.basedir,
|
||||||
forks=self.forks,
|
|
||||||
remote_pass=self.remote_pass,
|
|
||||||
module_path=self.module_path,
|
|
||||||
timeout=self.timeout,
|
|
||||||
remote_user=remote_user,
|
|
||||||
setup_cache=SETUP_CACHE,
|
|
||||||
basedir=self.basedir,
|
|
||||||
conditional=only_if
|
conditional=only_if
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -312,13 +326,11 @@ class PlayBook(object):
|
||||||
|
|
||||||
return rc
|
return rc
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _run_task(self, pattern=None, task=None, host_list=None,
|
def _run_task(self, pattern=None, task=None, host_list=None,
|
||||||
remote_user=None, handlers=None, conditional=False):
|
remote_user=None, handlers=None, conditional=False):
|
||||||
'''
|
''' run a single task in the playbook and recursively run any subtasks. '''
|
||||||
run a single task in the playbook and
|
|
||||||
recursively run any subtasks.
|
|
||||||
'''
|
|
||||||
|
|
||||||
# do not continue to run tasks on hosts that have had failures
|
# do not continue to run tasks on hosts that have had failures
|
||||||
host_list = self._prune_failed_hosts(host_list)
|
host_list = self._prune_failed_hosts(host_list)
|
||||||
|
@ -352,48 +364,7 @@ class PlayBook(object):
|
||||||
if results is None:
|
if results is None:
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
# walk through the results and build up
|
self._compute_aggregrate_counts(results)
|
||||||
# summary information about successes and
|
|
||||||
# failures. FIXME: TODO: split into subfunction!
|
|
||||||
|
|
||||||
dark = results.get("dark", {})
|
|
||||||
contacted = results.get("contacted", {})
|
|
||||||
|
|
||||||
for host, msg in dark.iteritems():
|
|
||||||
self.processed[host] = 1
|
|
||||||
self.callbacks.on_unreachable(host, msg)
|
|
||||||
if not host in self.dark:
|
|
||||||
self.dark[host] = 1
|
|
||||||
else:
|
|
||||||
self.dark[host] = self.dark[host] + 1
|
|
||||||
|
|
||||||
# FIXME: refactor
|
|
||||||
for host, results in contacted.iteritems():
|
|
||||||
self.processed[host] = 1
|
|
||||||
|
|
||||||
if utils.is_failed(results):
|
|
||||||
self.callbacks.on_failed(host, results)
|
|
||||||
if not host in self.failures:
|
|
||||||
self.failures[host] = 1
|
|
||||||
else:
|
|
||||||
self.failures[host] = self.failures[host] + 1
|
|
||||||
else:
|
|
||||||
self.callbacks.on_ok(host, results)
|
|
||||||
if not host in self.invocations:
|
|
||||||
self.invocations[host] = 1
|
|
||||||
else:
|
|
||||||
self.invocations[host] = self.invocations[host] + 1
|
|
||||||
if results.get('changed', False):
|
|
||||||
if not host in self.changed:
|
|
||||||
self.changed[host] = 1
|
|
||||||
else:
|
|
||||||
self.changed[host] = self.changed[host] + 1
|
|
||||||
# TODO: verify/test that async steps are skippable
|
|
||||||
if results.get('skipped', False):
|
|
||||||
if not host in self.changed:
|
|
||||||
self.skipped[host] = 1
|
|
||||||
else:
|
|
||||||
self.skipped[host] = self.skipped[host] + 1
|
|
||||||
|
|
||||||
# flag which notify handlers need to be run
|
# flag which notify handlers need to be run
|
||||||
# this will be on a SUBSET of the actual host list. For instance
|
# this will be on a SUBSET of the actual host list. For instance
|
||||||
|
@ -402,11 +373,13 @@ class PlayBook(object):
|
||||||
|
|
||||||
subtasks = task.get('notify', [])
|
subtasks = task.get('notify', [])
|
||||||
if len(subtasks) > 0:
|
if len(subtasks) > 0:
|
||||||
for host, results in contacted.iteritems():
|
for host, results in results.get('contacted',{}).iteritems():
|
||||||
if results.get('changed', False):
|
if results.get('changed', False):
|
||||||
for subtask in subtasks:
|
for subtask in subtasks:
|
||||||
self._flag_handler(handlers, subtask, host)
|
self._flag_handler(handlers, subtask, host)
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _flag_handler(self, handlers, match_name, host):
|
def _flag_handler(self, handlers, match_name, host):
|
||||||
'''
|
'''
|
||||||
if a task has any notify elements, flag handlers for run
|
if a task has any notify elements, flag handlers for run
|
||||||
|
@ -422,12 +395,13 @@ class PlayBook(object):
|
||||||
if name is None:
|
if name is None:
|
||||||
raise errors.AnsibleError('handler is missing a name')
|
raise errors.AnsibleError('handler is missing a name')
|
||||||
if match_name == name:
|
if match_name == name:
|
||||||
# flag the handler with the list of hosts
|
# flag the handler with the list of hosts it needs to be run on, it will be run later
|
||||||
# it needs to be run on, it will be run later
|
|
||||||
if not 'run' in x:
|
if not 'run' in x:
|
||||||
x['run'] = []
|
x['run'] = []
|
||||||
x['run'].append(host)
|
x['run'].append(host)
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _do_setup_step(self, pattern, vars, user, host_list, vars_files=None):
|
def _do_setup_step(self, pattern, vars, user, host_list, vars_files=None):
|
||||||
''' push variables down to the systems and get variables+facts back up '''
|
''' push variables down to the systems and get variables+facts back up '''
|
||||||
|
|
||||||
|
@ -489,31 +463,14 @@ class PlayBook(object):
|
||||||
|
|
||||||
# push any variables down to the system
|
# push any variables down to the system
|
||||||
setup_results = ansible.runner.Runner(
|
setup_results = ansible.runner.Runner(
|
||||||
pattern=pattern,
|
pattern=pattern, groups=self.groups, module_name='setup',
|
||||||
groups=self.groups,
|
module_args=push_var_str, host_list=self.host_list,
|
||||||
module_name='setup',
|
forks=self.forks, module_path=self.module_path,
|
||||||
module_args=push_var_str,
|
timeout=self.timeout, remote_user=user,
|
||||||
host_list=self.host_list,
|
remote_pass=self.remote_pass, setup_cache=SETUP_CACHE
|
||||||
forks=self.forks,
|
|
||||||
module_path=self.module_path,
|
|
||||||
timeout=self.timeout,
|
|
||||||
remote_user=user,
|
|
||||||
remote_pass=self.remote_pass,
|
|
||||||
setup_cache=SETUP_CACHE
|
|
||||||
).run()
|
).run()
|
||||||
|
|
||||||
# FIXME: similar logic up in run_task, refactor
|
self._compute_aggregrate_counts(setup_results, setup=True)
|
||||||
dark_hosts = setup_results.get('dark',{})
|
|
||||||
contacted_hosts = setup_results.get('contacted',{})
|
|
||||||
for (host, error) in dark_hosts.iteritems():
|
|
||||||
self.callbacks.on_dark_host(host, error)
|
|
||||||
self.dark[host] = 1
|
|
||||||
for (host, host_result) in contacted_hosts.iteritems():
|
|
||||||
if 'failed' in host_result:
|
|
||||||
self.callbacks.on_failed(host, host_result)
|
|
||||||
self.failures[host] = 1
|
|
||||||
else:
|
|
||||||
self.callbacks.on_ok(host, host_result)
|
|
||||||
|
|
||||||
# now for each result, load into the setup cache so we can
|
# now for each result, load into the setup cache so we can
|
||||||
# let runner template out future commands
|
# let runner template out future commands
|
||||||
|
@ -526,10 +483,10 @@ class PlayBook(object):
|
||||||
host_list = self._prune_failed_hosts(host_list)
|
host_list = self._prune_failed_hosts(host_list)
|
||||||
return host_list
|
return host_list
|
||||||
|
|
||||||
|
# *****************************************************
|
||||||
|
|
||||||
def _run_play(self, pg):
|
def _run_play(self, pg):
|
||||||
'''
|
''' run a list of tasks for a given pattern, in order '''
|
||||||
run a list of tasks for a given pattern, in order
|
|
||||||
'''
|
|
||||||
|
|
||||||
# get configuration information about the pattern
|
# get configuration information about the pattern
|
||||||
pattern = pg['hosts']
|
pattern = pg['hosts']
|
||||||
|
|
|
@ -32,6 +32,9 @@ class TestCallbacks(object):
|
||||||
def on_setup_secondary(self):
|
def on_setup_secondary(self):
|
||||||
self.events.append([ 'secondary_setup' ])
|
self.events.append([ 'secondary_setup' ])
|
||||||
|
|
||||||
|
def on_skipped(self, host):
|
||||||
|
self.events.append([ 'skipped', [ host ]])
|
||||||
|
|
||||||
def on_import_for_host(self, host, filename):
|
def on_import_for_host(self, host, filename):
|
||||||
self.events.append([ 'import', [ host, filename ]])
|
self.events.append([ 'import', [ host, filename ]])
|
||||||
|
|
||||||
|
@ -47,8 +50,9 @@ class TestCallbacks(object):
|
||||||
def on_failed(self, host, results):
|
def on_failed(self, host, results):
|
||||||
self.events.append([ 'failed', [ host, results ]])
|
self.events.append([ 'failed', [ host, results ]])
|
||||||
|
|
||||||
def on_ok(self, host, host_result):
|
def on_ok(self, host, result):
|
||||||
# delete certain info from host_result to make test comparisons easier
|
# delete certain info from host_result to make test comparisons easier
|
||||||
|
host_result = result.copy()
|
||||||
for k in [ 'ansible_job_id', 'invocation', 'md5sum', 'delta', 'start', 'end' ]:
|
for k in [ 'ansible_job_id', 'invocation', 'md5sum', 'delta', 'start', 'end' ]:
|
||||||
if k in host_result:
|
if k in host_result:
|
||||||
del host_result[k]
|
del host_result[k]
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
"import",
|
"import",
|
||||||
[
|
[
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
"/home/mdehaan/ansible/test/default_os.yml"
|
"/home/mdehaan/ansible/test/CentOS.yml"
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
|
@ -210,6 +210,28 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
|
[
|
||||||
|
"task start",
|
||||||
|
[
|
||||||
|
"this should be skipped",
|
||||||
|
false
|
||||||
|
]
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"skipped",
|
||||||
|
[
|
||||||
|
"127.0.0.1"
|
||||||
|
]
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"ok",
|
||||||
|
[
|
||||||
|
"127.0.0.1",
|
||||||
|
{
|
||||||
|
"skipped": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
],
|
||||||
[
|
[
|
||||||
"task start",
|
"task start",
|
||||||
[
|
[
|
||||||
|
@ -222,12 +244,10 @@
|
||||||
[
|
[
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
{
|
{
|
||||||
"cmd": [
|
"cmd": "echo this should fire once ",
|
||||||
"/bin/true"
|
|
||||||
],
|
|
||||||
"rc": 0,
|
"rc": 0,
|
||||||
"stderr": "",
|
"stderr": "",
|
||||||
"stdout": ""
|
"stdout": "this should fire once"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
|
@ -243,12 +263,10 @@
|
||||||
[
|
[
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
{
|
{
|
||||||
"cmd": [
|
"cmd": "echo this should fire once also ",
|
||||||
"/bin/true"
|
|
||||||
],
|
|
||||||
"rc": 0,
|
"rc": 0,
|
||||||
"stderr": "",
|
"stderr": "",
|
||||||
"stdout": ""
|
"stdout": "this should fire once also"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
]
|
]
|
||||||
|
@ -258,8 +276,9 @@
|
||||||
"changed": 2,
|
"changed": 2,
|
||||||
"dark": 0,
|
"dark": 0,
|
||||||
"failed": 0,
|
"failed": 0,
|
||||||
"resources": 9,
|
"resources": 12,
|
||||||
"skipped": 0
|
"skipped": 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,16 @@
|
||||||
# extremely simple test of the most basic of playbook engine/functions
|
# extremely simple test of the most basic of playbook engine/functions
|
||||||
---
|
---
|
||||||
- hosts: all
|
- hosts: all
|
||||||
|
|
||||||
|
# the 'weasels' string should show up in the output
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
answer: "Wuh, I think so, Brain, but if we didn't have ears, we'd look like weasels."
|
answer: "Wuh, I think so, Brain, but if we didn't have ears, we'd look like weasels."
|
||||||
port: 5150
|
port: 5150
|
||||||
|
|
||||||
|
# we should have import events for common_vars and CentOS.yml (if run on CentOS)
|
||||||
|
# sorry, tests are a bit platform specific just for now
|
||||||
|
|
||||||
vars_files:
|
vars_files:
|
||||||
- common_vars.yml
|
- common_vars.yml
|
||||||
- [ '$facter_operatingsystem.yml', 'default_os.yml' ]
|
- [ '$facter_operatingsystem.yml', 'default_os.yml' ]
|
||||||
|
@ -46,20 +53,26 @@
|
||||||
async: 10
|
async: 10
|
||||||
poll: 3
|
poll: 3
|
||||||
|
|
||||||
|
# the following command should be skipped
|
||||||
|
|
||||||
|
- name: this should be skipped
|
||||||
|
action: shell echo 'if you see this, this is wrong ($facter_operatingsystem)'
|
||||||
|
only_if: "'$facter_operatingsystem' == 'Imaginary'"
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|
||||||
# in the above test example, this should fire ONCE (at the end)
|
# in the above test example, this should fire ONCE (at the end)
|
||||||
- name: on change 1
|
- name: on change 1
|
||||||
action: command /bin/true
|
action: shell echo 'this should fire once'
|
||||||
|
|
||||||
# in the above test example, this should fire ONCE (at the end)
|
# in the above test example, this should fire ONCE (at the end)
|
||||||
|
|
||||||
- name: on change 2
|
- name: on change 2
|
||||||
action: command /bin/true
|
action: shell echo 'this should fire once also'
|
||||||
|
|
||||||
# in the above test example, this should NOT FIRE
|
# in the above test example, this should NOT FIRE
|
||||||
|
|
||||||
- name: on change 3
|
- name: on change 3
|
||||||
action: command /bin/true
|
action: shell echo 'if you see this, this is wrong'
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue