mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Merge branch 'v2_fixing' into devel
Conflicts: v2/ansible/playbook/helpers.py
This commit is contained in:
commit
0603b9c62c
34 changed files with 505 additions and 426 deletions
|
@ -1,43 +0,0 @@
|
|||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
class HostLog:
|
||||
|
||||
def __init__(self, host):
|
||||
self.host = host
|
||||
|
||||
def add_task_result(self, task_result):
|
||||
pass
|
||||
|
||||
def has_failures(self):
|
||||
assert False
|
||||
|
||||
def has_changes(self):
|
||||
assert False
|
||||
|
||||
def get_tasks(self, are_executed=None, are_changed=None, are_successful=None):
|
||||
assert False
|
||||
|
||||
def get_current_running_task(self)
|
||||
# atomic decorator likely required?
|
||||
assert False
|
||||
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
class HostLogManager:
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_log_for_host(self, host):
|
||||
assert False
|
||||
|
|
@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import *
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
from ansible.utils.boolean import boolean
|
||||
|
@ -38,9 +39,10 @@ class HostState:
|
|||
self.run_state = PlayIterator.ITERATING_SETUP
|
||||
self.fail_state = PlayIterator.FAILED_NONE
|
||||
self.pending_setup = False
|
||||
self.child_state = None
|
||||
|
||||
def __repr__(self):
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s" % (
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, child state? %s" % (
|
||||
self.cur_block,
|
||||
self.cur_regular_task,
|
||||
self.cur_rescue_task,
|
||||
|
@ -49,6 +51,7 @@ class HostState:
|
|||
self.run_state,
|
||||
self.fail_state,
|
||||
self.pending_setup,
|
||||
self.child_state,
|
||||
)
|
||||
|
||||
def get_current_block(self):
|
||||
|
@ -64,6 +67,7 @@ class HostState:
|
|||
new_state.run_state = self.run_state
|
||||
new_state.fail_state = self.fail_state
|
||||
new_state.pending_setup = self.pending_setup
|
||||
new_state.child_state = self.child_state
|
||||
return new_state
|
||||
|
||||
class PlayIterator:
|
||||
|
@ -104,75 +108,35 @@ class PlayIterator:
|
|||
except KeyError:
|
||||
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
|
||||
|
||||
def get_next_task_for_host(self, host, peek=False, lock_step=True):
|
||||
def get_next_task_for_host(self, host, peek=False):
|
||||
|
||||
s = self.get_host_state(host)
|
||||
|
||||
task = None
|
||||
if s.run_state == self.ITERATING_COMPLETE:
|
||||
return None
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
cur_block = s._blocks[s.cur_block]
|
||||
except IndexError:
|
||||
s.run_state = self.ITERATING_COMPLETE
|
||||
break
|
||||
elif s.run_state == self.ITERATING_SETUP:
|
||||
s.run_state = self.ITERATING_TASKS
|
||||
s.pending_setup = True
|
||||
if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
|
||||
if not peek:
|
||||
# mark the host as having gathered facts
|
||||
host.set_gathered_facts(True)
|
||||
|
||||
if s.run_state == self.ITERATING_SETUP:
|
||||
s.run_state = self.ITERATING_TASKS
|
||||
if self._play._gather_facts == 'smart' and not host.gathered_facts or boolean(self._play._gather_facts):
|
||||
# mark the host as having gathered facts
|
||||
host.set_gathered_facts(True)
|
||||
task = Task()
|
||||
task.action = 'setup'
|
||||
task.args = {}
|
||||
task.set_loader(self._play._loader)
|
||||
else:
|
||||
s.pending_setup = False
|
||||
|
||||
task = Task()
|
||||
task.action = 'setup'
|
||||
task.set_loader(self._play._loader)
|
||||
|
||||
elif s.run_state == self.ITERATING_TASKS:
|
||||
# clear the pending setup flag, since we're past that and it didn't fail
|
||||
if s.pending_setup:
|
||||
s.pending_setup = False
|
||||
|
||||
if s.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
|
||||
s.run_state = self.ITERATING_RESCUE
|
||||
elif s.cur_regular_task >= len(cur_block.block):
|
||||
s.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
task = cur_block.block[s.cur_regular_task]
|
||||
s.cur_regular_task += 1
|
||||
break
|
||||
elif s.run_state == self.ITERATING_RESCUE:
|
||||
if s.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
|
||||
s.run_state = self.ITERATING_ALWAYS
|
||||
elif s.cur_rescue_task >= len(cur_block.rescue):
|
||||
if len(cur_block.rescue) > 0:
|
||||
s.fail_state = self.FAILED_NONE
|
||||
s.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
task = cur_block.rescue[s.cur_rescue_task]
|
||||
s.cur_rescue_task += 1
|
||||
break
|
||||
elif s.run_state == self.ITERATING_ALWAYS:
|
||||
if s.cur_always_task >= len(cur_block.always):
|
||||
if s.fail_state != self.FAILED_NONE:
|
||||
s.run_state = self.ITERATING_COMPLETE
|
||||
break
|
||||
else:
|
||||
s.cur_block += 1
|
||||
s.cur_regular_task = 0
|
||||
s.cur_rescue_task = 0
|
||||
s.cur_always_task = 0
|
||||
s.run_state = self.ITERATING_TASKS
|
||||
else:
|
||||
task= cur_block.always[s.cur_always_task]
|
||||
s.cur_always_task += 1
|
||||
break
|
||||
if not task:
|
||||
(s, task) = self._get_next_task_from_state(s, peek=peek)
|
||||
|
||||
if task and task._role:
|
||||
# if we had a current role, mark that role as completed
|
||||
if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
|
||||
s.cur_role._completed = True
|
||||
|
||||
s.cur_role = task._role
|
||||
|
||||
if not peek:
|
||||
|
@ -180,6 +144,86 @@ class PlayIterator:
|
|||
|
||||
return (s, task)
|
||||
|
||||
|
||||
def _get_next_task_from_state(self, state, peek):
|
||||
|
||||
task = None
|
||||
|
||||
# if we previously encountered a child block and we have a
|
||||
# saved child state, try and get the next task from there
|
||||
if state.child_state:
|
||||
(state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
|
||||
if task:
|
||||
return (state.child_state, task)
|
||||
else:
|
||||
state.child_state = None
|
||||
|
||||
# try and find the next task, given the current state.
|
||||
while True:
|
||||
# try to get the current block from the list of blocks, and
|
||||
# if we run past the end of the list we know we're done with
|
||||
# this block
|
||||
try:
|
||||
block = state._blocks[state.cur_block]
|
||||
except IndexError:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
return (state, None)
|
||||
|
||||
if state.run_state == self.ITERATING_TASKS:
|
||||
# clear the pending setup flag, since we're past that and it didn't fail
|
||||
if state.pending_setup:
|
||||
state.pending_setup = False
|
||||
|
||||
if state.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
|
||||
state.run_state = self.ITERATING_RESCUE
|
||||
elif state.cur_regular_task >= len(block.block):
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
task = block.block[state.cur_regular_task]
|
||||
state.cur_regular_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_RESCUE:
|
||||
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
elif state.cur_rescue_task >= len(block.rescue):
|
||||
if len(block.rescue) > 0:
|
||||
state.fail_state = self.FAILED_NONE
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
task = block.rescue[state.cur_rescue_task]
|
||||
state.cur_rescue_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_ALWAYS:
|
||||
if state.cur_always_task >= len(block.always):
|
||||
if state.fail_state != self.FAILED_NONE:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
else:
|
||||
state.cur_block += 1
|
||||
state.cur_regular_task = 0
|
||||
state.cur_rescue_task = 0
|
||||
state.cur_always_task = 0
|
||||
state.run_state = self.ITERATING_TASKS
|
||||
state.child_state = None
|
||||
else:
|
||||
task = block.always[state.cur_always_task]
|
||||
state.cur_always_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_COMPLETE:
|
||||
return (state, None)
|
||||
|
||||
# if the current task is actually a child block, we dive into it
|
||||
if isinstance(task, Block):
|
||||
state.child_state = HostState(blocks=[task])
|
||||
state.child_state.run_state = self.ITERATING_TASKS
|
||||
state.child_state.cur_role = state.cur_role
|
||||
(state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
|
||||
|
||||
# if something above set the task, break out of the loop now
|
||||
if task:
|
||||
break
|
||||
|
||||
return (state, task)
|
||||
|
||||
def mark_host_failed(self, host):
|
||||
s = self.get_host_state(host)
|
||||
if s.pending_setup:
|
||||
|
@ -206,25 +250,41 @@ class PlayIterator:
|
|||
the different processes, and not all data structures are preserved. This method
|
||||
allows us to find the original task passed into the executor engine.
|
||||
'''
|
||||
def _search_block(block, task):
|
||||
for t in block.block:
|
||||
if isinstance(t, Block):
|
||||
res = _search_block(t, task)
|
||||
if res:
|
||||
return res
|
||||
elif t._uuid == task._uuid:
|
||||
return t
|
||||
for t in block.rescue:
|
||||
if isinstance(t, Block):
|
||||
res = _search_block(t, task)
|
||||
if res:
|
||||
return res
|
||||
elif t._uuid == task._uuid:
|
||||
return t
|
||||
for t in block.always:
|
||||
if isinstance(t, Block):
|
||||
res = _search_block(t, task)
|
||||
if res:
|
||||
return res
|
||||
elif t._uuid == task._uuid:
|
||||
return t
|
||||
return None
|
||||
|
||||
s = self.get_host_state(host)
|
||||
for block in s._blocks:
|
||||
if block.block:
|
||||
for t in block.block:
|
||||
if t._uuid == task._uuid:
|
||||
return t
|
||||
if block.rescue:
|
||||
for t in block.rescue:
|
||||
if t._uuid == task._uuid:
|
||||
return t
|
||||
if block.always:
|
||||
for t in block.always:
|
||||
if t._uuid == task._uuid:
|
||||
return t
|
||||
res = _search_block(block, task)
|
||||
if res:
|
||||
return res
|
||||
|
||||
return None
|
||||
|
||||
def add_tasks(self, host, task_list):
|
||||
s = self.get_host_state(host)
|
||||
target_block = s._blocks[s.cur_block].copy()
|
||||
target_block = s._blocks[s.cur_block].copy(exclude_parent=True)
|
||||
|
||||
if s.run_state == self.ITERATING_TASKS:
|
||||
before = target_block.block[:s.cur_regular_task]
|
||||
|
|
|
@ -26,6 +26,7 @@ from ansible.errors import *
|
|||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.playbook import Playbook
|
||||
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
class PlaybookExecutor:
|
||||
|
@ -70,8 +71,8 @@ class PlaybookExecutor:
|
|||
|
||||
for batch in self._get_serialized_batches(new_play):
|
||||
if len(batch) == 0:
|
||||
self._tqm._callback.playbook_on_play_start(new_play.name)
|
||||
self._tqm._callback.playbook_on_no_hosts_matched()
|
||||
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
|
||||
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
|
||||
result = 0
|
||||
break
|
||||
# restrict the inventory to the hosts in the serialized batch
|
||||
|
@ -90,6 +91,36 @@ class PlaybookExecutor:
|
|||
raise
|
||||
|
||||
self._cleanup()
|
||||
|
||||
# FIXME: this stat summary stuff should be cleaned up and moved
|
||||
# to a new method, if it even belongs here...
|
||||
self._tqm._display.banner("PLAY RECAP")
|
||||
|
||||
hosts = sorted(self._tqm._stats.processed.keys())
|
||||
for h in hosts:
|
||||
t = self._tqm._stats.summarize(h)
|
||||
|
||||
self._tqm._display.display("%s : %s %s %s %s" % (
|
||||
hostcolor(h, t),
|
||||
colorize('ok', t['ok'], 'green'),
|
||||
colorize('changed', t['changed'], 'yellow'),
|
||||
colorize('unreachable', t['unreachable'], 'red'),
|
||||
colorize('failed', t['failures'], 'red')),
|
||||
screen_only=True
|
||||
)
|
||||
|
||||
self._tqm._display.display("%s : %s %s %s %s" % (
|
||||
hostcolor(h, t, False),
|
||||
colorize('ok', t['ok'], None),
|
||||
colorize('changed', t['changed'], None),
|
||||
colorize('unreachable', t['unreachable'], None),
|
||||
colorize('failed', t['failures'], None)),
|
||||
log_only=True
|
||||
)
|
||||
|
||||
self._tqm._display.display("", screen_only=True)
|
||||
# END STATS STUFF
|
||||
|
||||
return result
|
||||
|
||||
def _cleanup(self, signum=None, framenum=None):
|
||||
|
|
51
v2/ansible/executor/stats.py
Normal file
51
v2/ansible/executor/stats.py
Normal file
|
@ -0,0 +1,51 @@
|
|||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
class AggregateStats:
|
||||
''' holds stats about per-host activity during playbook runs '''
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.processed = {}
|
||||
self.failures = {}
|
||||
self.ok = {}
|
||||
self.dark = {}
|
||||
self.changed = {}
|
||||
self.skipped = {}
|
||||
|
||||
def increment(self, what, host):
|
||||
''' helper function to bump a statistic '''
|
||||
|
||||
self.processed[host] = 1
|
||||
prev = (getattr(self, what)).get(host, 0)
|
||||
getattr(self, what)[host] = prev+1
|
||||
|
||||
def summarize(self, host):
|
||||
''' return information about a particular host '''
|
||||
|
||||
return dict(
|
||||
ok = self.ok.get(host, 0),
|
||||
failures = self.failures.get(host, 0),
|
||||
unreachable = self.dark.get(host,0),
|
||||
changed = self.changed.get(host, 0),
|
||||
skipped = self.skipped.get(host, 0)
|
||||
)
|
||||
|
|
@ -238,10 +238,14 @@ class TaskExecutor:
|
|||
if self._task.poll > 0:
|
||||
result = self._poll_async_result(result=result)
|
||||
|
||||
# update the local copy of vars with the registered value, if specified
|
||||
# update the local copy of vars with the registered value, if specified,
|
||||
# or any facts which may have been generated by the module execution
|
||||
if self._task.register:
|
||||
vars_copy[self._task.register] = result
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
vars_copy.update(result['ansible_facts'])
|
||||
|
||||
# create a conditional object to evaluate task conditions
|
||||
cond = Conditional(loader=self._loader)
|
||||
|
||||
|
@ -267,6 +271,15 @@ class TaskExecutor:
|
|||
if attempt < retries - 1:
|
||||
time.sleep(delay)
|
||||
|
||||
# do the final update of the local variables here, for both registered
|
||||
# values and any facts which may have been created
|
||||
if self._task.register:
|
||||
variables[self._task.register] = result
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
variables.update(result['ansible_facts'])
|
||||
|
||||
# and return
|
||||
debug("attempt loop complete, returning result")
|
||||
return result
|
||||
|
||||
|
|
|
@ -29,9 +29,11 @@ from ansible.executor.connection_info import ConnectionInformation
|
|||
from ansible.executor.play_iterator import PlayIterator
|
||||
from ansible.executor.process.worker import WorkerProcess
|
||||
from ansible.executor.process.result import ResultProcess
|
||||
from ansible.executor.stats import AggregateStats
|
||||
from ansible.plugins import callback_loader, strategy_loader
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
from ansible.utils.display import Display
|
||||
|
||||
__all__ = ['TaskQueueManager']
|
||||
|
||||
|
@ -53,6 +55,9 @@ class TaskQueueManager:
|
|||
self._variable_manager = variable_manager
|
||||
self._loader = loader
|
||||
self._options = options
|
||||
self._stats = AggregateStats()
|
||||
|
||||
self._display = Display()
|
||||
|
||||
# a special flag to help us exit cleanly
|
||||
self._terminated = False
|
||||
|
@ -66,9 +71,14 @@ class TaskQueueManager:
|
|||
|
||||
self._final_q = multiprocessing.Queue()
|
||||
|
||||
# FIXME: hard-coded the default callback plugin here, which
|
||||
# should be configurable.
|
||||
self._callback = callback_loader.get(callback)
|
||||
# load all available callback plugins
|
||||
# FIXME: we need an option to white-list callback plugins
|
||||
self._callback_plugins = []
|
||||
for callback_plugin in callback_loader.all(class_only=True):
|
||||
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
|
||||
self._callback_plugins.append(callback_plugin(self._display))
|
||||
else:
|
||||
self._callback_plugins.append(callback_plugin())
|
||||
|
||||
# create the pool of worker threads, based on the number of forks specified
|
||||
try:
|
||||
|
@ -131,16 +141,11 @@ class TaskQueueManager:
|
|||
'''
|
||||
|
||||
connection_info = ConnectionInformation(play, self._options)
|
||||
self._callback.set_connection_info(connection_info)
|
||||
for callback_plugin in self._callback_plugins:
|
||||
if hasattr(callback_plugin, 'set_connection_info'):
|
||||
callback_plugin.set_connection_info(connection_info)
|
||||
|
||||
# run final validation on the play now, to make sure fields are templated
|
||||
# FIXME: is this even required? Everything is validated and merged at the
|
||||
# task level, so else in the play needs to be templated
|
||||
#all_vars = self._vmw.get_vars(loader=self._dlw, play=play)
|
||||
#all_vars = self._vmw.get_vars(loader=self._loader, play=play)
|
||||
#play.post_validate(all_vars=all_vars)
|
||||
|
||||
self._callback.playbook_on_play_start(play.name)
|
||||
self.send_callback('v2_playbook_on_play_start', play)
|
||||
|
||||
# initialize the shared dictionary containing the notified handlers
|
||||
self._initialize_notified_handlers(play.handlers)
|
||||
|
@ -172,9 +177,6 @@ class TaskQueueManager:
|
|||
def get_inventory(self):
|
||||
return self._inventory
|
||||
|
||||
def get_callback(self):
|
||||
return self._callback
|
||||
|
||||
def get_variable_manager(self):
|
||||
return self._variable_manager
|
||||
|
||||
|
@ -201,3 +203,18 @@ class TaskQueueManager:
|
|||
|
||||
def terminate(self):
|
||||
self._terminated = True
|
||||
|
||||
def send_callback(self, method_name, *args, **kwargs):
|
||||
for callback_plugin in self._callback_plugins:
|
||||
# a plugin that set self.disabled to True will not be called
|
||||
# see osx_say.py example for such a plugin
|
||||
if getattr(callback_plugin, 'disabled', False):
|
||||
continue
|
||||
methods = [
|
||||
getattr(callback_plugin, method_name, None),
|
||||
getattr(callback_plugin, 'on_any', None)
|
||||
]
|
||||
for method in methods:
|
||||
if method is not None:
|
||||
method(*args, **kwargs)
|
||||
|
||||
|
|
0
v2/ansible/executor/task_queue_manager.py:
Normal file
0
v2/ansible/executor/task_queue_manager.py:
Normal file
|
@ -1 +1 @@
|
|||
Subproject commit 400166a655b304094005aace178d0fab1cfe9763
|
||||
Subproject commit 46e316a20a92b5a54b982eddb301eb3d57da397e
|
|
@ -99,11 +99,14 @@ class DataLoader():
|
|||
def path_exists(self, path):
|
||||
return os.path.exists(path)
|
||||
|
||||
def is_file(self, path):
|
||||
return os.path.isfile(path)
|
||||
|
||||
def is_directory(self, path):
|
||||
return os.path.isdir(path)
|
||||
|
||||
def is_file(self, path):
|
||||
return os.path.isfile(path)
|
||||
def list_directory(self, path):
|
||||
return os.path.listdir(path)
|
||||
|
||||
def _safe_load(self, stream, file_name=None):
|
||||
''' Implements yaml.safe_load(), except using our custom loader class. '''
|
||||
|
|
|
@ -43,6 +43,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
self._task_include = task_include
|
||||
self._use_handlers = use_handlers
|
||||
self._dep_chain = []
|
||||
self._vars = dict()
|
||||
|
||||
super(Block, self).__init__()
|
||||
|
||||
|
@ -56,9 +57,12 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
|
||||
if self._role:
|
||||
all_vars.update(self._role.get_vars())
|
||||
if self._parent_block:
|
||||
all_vars.update(self._parent_block.get_vars())
|
||||
if self._task_include:
|
||||
all_vars.update(self._task_include.get_vars())
|
||||
|
||||
all_vars.update(self._vars)
|
||||
return all_vars
|
||||
|
||||
@staticmethod
|
||||
|
@ -131,25 +135,29 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
# use_handlers=self._use_handlers,
|
||||
# )
|
||||
|
||||
def copy(self):
|
||||
def copy(self, exclude_parent=False):
|
||||
def _dupe_task_list(task_list, new_block):
|
||||
new_task_list = []
|
||||
for task in task_list:
|
||||
new_task = task.copy(exclude_block=True)
|
||||
new_task._block = new_block
|
||||
if isinstance(task, Block):
|
||||
new_task = task.copy(exclude_parent=True)
|
||||
new_task._parent_block = new_block
|
||||
else:
|
||||
new_task = task.copy(exclude_block=True)
|
||||
new_task._block = new_block
|
||||
new_task_list.append(new_task)
|
||||
return new_task_list
|
||||
|
||||
new_me = super(Block, self).copy()
|
||||
new_me._use_handlers = self._use_handlers
|
||||
new_me._dep_chain = self._dep_chain[:]
|
||||
new_me._dep_chain = self._dep_chain[:]
|
||||
|
||||
new_me.block = _dupe_task_list(self.block or [], new_me)
|
||||
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
|
||||
new_me.always = _dupe_task_list(self.always or [], new_me)
|
||||
|
||||
new_me._parent_block = None
|
||||
if self._parent_block:
|
||||
if self._parent_block and not exclude_parent:
|
||||
new_me._parent_block = self._parent_block.copy()
|
||||
|
||||
new_me._role = None
|
||||
|
@ -260,7 +268,7 @@ class Block(Base, Become, Conditional, Taggable):
|
|||
value = self._attributes[attr]
|
||||
if not value:
|
||||
if self._parent_block:
|
||||
value = getattr(self._block, attr)
|
||||
value = getattr(self._parent_block, attr)
|
||||
elif self._role:
|
||||
value = getattr(self._role, attr)
|
||||
if not value and len(self._dep_chain):
|
||||
|
|
|
@ -60,9 +60,9 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler
|
|||
'''
|
||||
|
||||
# we import here to prevent a circular dependency with imports
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.task import Task
|
||||
#from ansible.playbook.task_include import TaskInclude
|
||||
|
||||
assert type(ds) == list
|
||||
|
||||
|
@ -71,27 +71,17 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler
|
|||
if not isinstance(task, dict):
|
||||
raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds)
|
||||
|
||||
#if 'include' in task:
|
||||
# cur_basedir = None
|
||||
# if isinstance(task, AnsibleBaseYAMLObject) and loader:
|
||||
# pos_info = task.ansible_pos
|
||||
# new_basedir = os.path.dirname(pos_info[0])
|
||||
# cur_basedir = loader.get_basedir()
|
||||
# loader.set_basedir(new_basedir)
|
||||
|
||||
# t = TaskInclude.load(
|
||||
# task,
|
||||
# block=block,
|
||||
# role=role,
|
||||
# task_include=task_include,
|
||||
# use_handlers=use_handlers,
|
||||
# loader=loader
|
||||
# )
|
||||
|
||||
# if cur_basedir and loader:
|
||||
# loader.set_basedir(cur_basedir)
|
||||
#else:
|
||||
if True:
|
||||
if 'block' in task:
|
||||
t = Block.load(
|
||||
task,
|
||||
parent_block=block,
|
||||
role=role,
|
||||
task_include=task_include,
|
||||
use_handlers=use_handlers,
|
||||
variable_manager=variable_manager,
|
||||
loader=loader,
|
||||
)
|
||||
else:
|
||||
if use_handlers:
|
||||
t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||
else:
|
||||
|
@ -120,15 +110,3 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader
|
|||
|
||||
return roles
|
||||
|
||||
def compile_block_list(block_list):
|
||||
'''
|
||||
Given a list of blocks, compile them into a flat list of tasks
|
||||
'''
|
||||
|
||||
task_list = []
|
||||
|
||||
for block in block_list:
|
||||
task_list.extend(block.compile())
|
||||
|
||||
return task_list
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ from ansible.errors import AnsibleError, AnsibleParserError
|
|||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||
from ansible.playbook.base import Base
|
||||
from ansible.playbook.become import Become
|
||||
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list
|
||||
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
|
||||
from ansible.playbook.role import Role
|
||||
from ansible.playbook.taggable import Taggable
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ from ansible.playbook.attribute import FieldAttribute
|
|||
from ansible.playbook.base import Base
|
||||
from ansible.playbook.become import Become
|
||||
from ansible.playbook.conditional import Conditional
|
||||
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
|
||||
from ansible.playbook.helpers import load_list_of_blocks
|
||||
from ansible.playbook.role.include import RoleInclude
|
||||
from ansible.playbook.role.metadata import RoleMetadata
|
||||
from ansible.playbook.taggable import Taggable
|
||||
|
|
|
@ -78,7 +78,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
# FIXME: this should not be a Task
|
||||
_meta = FieldAttribute(isa='string')
|
||||
|
||||
_name = FieldAttribute(isa='string')
|
||||
_name = FieldAttribute(isa='string', default='')
|
||||
|
||||
_no_log = FieldAttribute(isa='bool')
|
||||
_notify = FieldAttribute(isa='list')
|
||||
|
@ -167,7 +167,6 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
args_parser = ModuleArgsParser(task_ds=ds)
|
||||
(action, args, delegate_to) = args_parser.parse()
|
||||
|
||||
|
||||
new_ds['action'] = action
|
||||
new_ds['args'] = args
|
||||
new_ds['delegate_to'] = delegate_to
|
||||
|
@ -199,6 +198,8 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
|
||||
def get_vars(self):
|
||||
all_vars = self.vars.copy()
|
||||
if self._block:
|
||||
all_vars.update(self._block.get_vars())
|
||||
if self._task_include:
|
||||
all_vars.update(self._task_include.get_vars())
|
||||
|
||||
|
|
|
@ -240,7 +240,10 @@ class PluginLoader:
|
|||
continue
|
||||
if path not in self._module_cache:
|
||||
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
|
||||
yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
|
||||
if kwargs.get('class_only', False):
|
||||
yield getattr(self._module_cache[path], self.class_name)
|
||||
else:
|
||||
yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
|
||||
|
||||
action_loader = PluginLoader(
|
||||
'ActionModule',
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.utils.display import Display
|
||||
#from ansible.utils.display import Display
|
||||
|
||||
__all__ = ["CallbackBase"]
|
||||
|
||||
|
@ -34,8 +34,8 @@ class CallbackBase:
|
|||
# FIXME: the list of functions here needs to be updated once we have
|
||||
# finalized the list of callback methods used in the default callback
|
||||
|
||||
def __init__(self):
|
||||
self._display = Display()
|
||||
def __init__(self, display):
|
||||
self._display = display
|
||||
|
||||
def set_connection_info(self, conn_info):
|
||||
# FIXME: this is a temporary hack, as the connection info object
|
||||
|
|
|
@ -30,25 +30,15 @@ class CallbackModule(CallbackBase):
|
|||
to stdout when new callback events are received.
|
||||
'''
|
||||
|
||||
def _print_banner(self, msg, color=None):
|
||||
'''
|
||||
Prints a header-looking line with stars taking up to 80 columns
|
||||
of width (3 columns, minimum)
|
||||
'''
|
||||
msg = msg.strip()
|
||||
star_len = (80 - len(msg))
|
||||
if star_len < 0:
|
||||
star_len = 3
|
||||
stars = "*" * star_len
|
||||
self._display.display("\n%s %s" % (msg, stars), color=color)
|
||||
CALLBACK_VERSION = 2.0
|
||||
|
||||
def on_any(self, *args, **kwargs):
|
||||
def v2_on_any(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def runner_on_failed(self, task, result, ignore_errors=False):
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red')
|
||||
|
||||
def runner_on_ok(self, task, result):
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
||||
if result._task.action == 'include':
|
||||
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
|
||||
|
@ -68,7 +58,7 @@ class CallbackModule(CallbackBase):
|
|||
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
|
||||
self._display.display(msg, color=color)
|
||||
|
||||
def runner_on_skipped(self, task, result):
|
||||
def v2_runner_on_skipped(self, result):
|
||||
msg = "skipping: [%s]" % result._host.get_name()
|
||||
if self._display._verbosity > 0 or 'verbose_always' in result._result:
|
||||
indent = None
|
||||
|
@ -78,57 +68,66 @@ class CallbackModule(CallbackBase):
|
|||
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
|
||||
self._display.display(msg, color='cyan')
|
||||
|
||||
def runner_on_unreachable(self, task, result):
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red')
|
||||
|
||||
def runner_on_no_hosts(self, task):
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
pass
|
||||
|
||||
def runner_on_async_poll(self, host, res, jid, clock):
|
||||
def v2_runner_on_async_poll(self, result):
|
||||
pass
|
||||
|
||||
def runner_on_async_ok(self, host, res, jid):
|
||||
def v2_runner_on_async_ok(self, result):
|
||||
pass
|
||||
|
||||
def runner_on_async_failed(self, host, res, jid):
|
||||
def v2_runner_on_async_failed(self, result):
|
||||
pass
|
||||
|
||||
def playbook_on_start(self):
|
||||
def v2_runner_on_file_diff(self, result, diff):
|
||||
pass
|
||||
|
||||
def playbook_on_notify(self, host, handler):
|
||||
def v2_playbook_on_start(self):
|
||||
pass
|
||||
|
||||
def playbook_on_no_hosts_matched(self):
|
||||
def v2_playbook_on_notify(self, result, handler):
|
||||
pass
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
self._display.display("skipping: no hosts matched", color='cyan')
|
||||
|
||||
def playbook_on_no_hosts_remaining(self):
|
||||
self._print_banner("NO MORE HOSTS LEFT")
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
self._display.banner("NO MORE HOSTS LEFT")
|
||||
|
||||
def playbook_on_task_start(self, name, is_conditional):
|
||||
self._print_banner("TASK [%s]" % name.strip())
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self._display.banner("TASK [%s]" % task.get_name().strip())
|
||||
|
||||
def playbook_on_cleanup_task_start(self, name):
|
||||
self._print_banner("CLEANUP TASK [%s]" % name.strip())
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
|
||||
|
||||
def playbook_on_handler_task_start(self, name):
|
||||
self._print_banner("RUNNING HANDLER [%s]" % name.strip())
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
|
||||
|
||||
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||
pass
|
||||
|
||||
def playbook_on_setup(self):
|
||||
def v2_playbook_on_setup(self):
|
||||
pass
|
||||
|
||||
def playbook_on_import_for_host(self, host, imported_file):
|
||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||
pass
|
||||
|
||||
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||
pass
|
||||
|
||||
def playbook_on_play_start(self, name):
|
||||
self._print_banner("PLAY [%s]" % name.strip())
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
name = play.get_name().strip()
|
||||
if not name:
|
||||
msg = "PLAY"
|
||||
else:
|
||||
msg = "PLAY [%s]" % name
|
||||
|
||||
def playbook_on_stats(self, stats):
|
||||
self._display.banner(name)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
pass
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@ class CallbackModule(CallbackBase):
|
|||
to stdout when new callback events are received.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
|
||||
def _print_banner(self, msg):
|
||||
'''
|
||||
Prints a header-looking line with stars taking up to 80 columns
|
||||
|
|
|
@ -28,7 +28,7 @@ from ansible.inventory.host import Host
|
|||
from ansible.inventory.group import Group
|
||||
|
||||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
|
||||
from ansible.playbook.helpers import load_list_of_blocks
|
||||
from ansible.playbook.role import ROLE_CACHE, hash_params
|
||||
from ansible.plugins import module_loader
|
||||
from ansible.utils.debug import debug
|
||||
|
@ -49,7 +49,7 @@ class StrategyBase:
|
|||
self._inventory = tqm.get_inventory()
|
||||
self._workers = tqm.get_workers()
|
||||
self._notified_handlers = tqm.get_notified_handlers()
|
||||
self._callback = tqm.get_callback()
|
||||
#self._callback = tqm.get_callback()
|
||||
self._variable_manager = tqm.get_variable_manager()
|
||||
self._loader = tqm.get_loader()
|
||||
self._final_q = tqm._final_q
|
||||
|
@ -73,6 +73,9 @@ class StrategyBase:
|
|||
debug("running handlers")
|
||||
result &= self.run_handlers(iterator, connection_info)
|
||||
|
||||
# send the stats callback
|
||||
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
|
||||
|
||||
if not result:
|
||||
if num_unreachable > 0:
|
||||
return 3
|
||||
|
@ -84,7 +87,7 @@ class StrategyBase:
|
|||
return 0
|
||||
|
||||
def get_hosts_remaining(self, play):
|
||||
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.get_name() not in self._tqm._unreachable_hosts]
|
||||
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
|
||||
|
||||
def get_failed_hosts(self, play):
|
||||
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
|
||||
|
@ -132,17 +135,23 @@ class StrategyBase:
|
|||
task = task_result._task
|
||||
if result[0] == 'host_task_failed':
|
||||
if not task.ignore_errors:
|
||||
debug("marking %s as failed" % host.get_name())
|
||||
debug("marking %s as failed" % host.name)
|
||||
iterator.mark_host_failed(host)
|
||||
self._tqm._failed_hosts[host.get_name()] = True
|
||||
self._callback.runner_on_failed(task, task_result)
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
self._tqm._stats.increment('failures', host.name)
|
||||
self._tqm.send_callback('v2_runner_on_failed', task_result)
|
||||
elif result[0] == 'host_unreachable':
|
||||
self._tqm._unreachable_hosts[host.get_name()] = True
|
||||
self._callback.runner_on_unreachable(task, task_result)
|
||||
self._tqm._unreachable_hosts[host.name] = True
|
||||
self._tqm._stats.increment('dark', host.name)
|
||||
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
|
||||
elif result[0] == 'host_task_skipped':
|
||||
self._callback.runner_on_skipped(task, task_result)
|
||||
self._tqm._stats.increment('skipped', host.name)
|
||||
self._tqm.send_callback('v2_runner_on_skipped', task_result)
|
||||
elif result[0] == 'host_task_ok':
|
||||
self._callback.runner_on_ok(task, task_result)
|
||||
self._tqm._stats.increment('ok', host.name)
|
||||
if 'changed' in task_result._result and task_result._result['changed']:
|
||||
self._tqm._stats.increment('changed', host.name)
|
||||
self._tqm.send_callback('v2_runner_on_ok', task_result)
|
||||
|
||||
self._pending_results -= 1
|
||||
if host.name in self._blocked_hosts:
|
||||
|
@ -160,22 +169,6 @@ class StrategyBase:
|
|||
|
||||
ret_results.append(task_result)
|
||||
|
||||
#elif result[0] == 'include':
|
||||
# host = result[1]
|
||||
# task = result[2]
|
||||
# include_file = result[3]
|
||||
# include_vars = result[4]
|
||||
#
|
||||
# if isinstance(task, Handler):
|
||||
# # FIXME: figure out how to make includes work for handlers
|
||||
# pass
|
||||
# else:
|
||||
# original_task = iterator.get_original_task(host, task)
|
||||
# if original_task and original_task._role:
|
||||
# include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file)
|
||||
# new_tasks = self._load_included_file(original_task, include_file, include_vars)
|
||||
# iterator.add_tasks(host, new_tasks)
|
||||
|
||||
elif result[0] == 'add_host':
|
||||
task_result = result[1]
|
||||
new_host_info = task_result.get('add_host', dict())
|
||||
|
@ -322,14 +315,11 @@ class StrategyBase:
|
|||
loader=self._loader
|
||||
)
|
||||
|
||||
|
||||
task_list = compile_block_list(block_list)
|
||||
|
||||
# set the vars for this task from those specified as params to the include
|
||||
for t in task_list:
|
||||
t.vars = included_file._args.copy()
|
||||
for b in block_list:
|
||||
b._vars = included_file._args.copy()
|
||||
|
||||
return task_list
|
||||
return block_list
|
||||
|
||||
def cleanup(self, iterator, connection_info):
|
||||
'''
|
||||
|
@ -361,7 +351,7 @@ class StrategyBase:
|
|||
while work_to_do:
|
||||
work_to_do = False
|
||||
for host in failed_hosts:
|
||||
host_name = host.get_name()
|
||||
host_name = host.name
|
||||
|
||||
if host_name in self._tqm._failed_hosts:
|
||||
iterator.mark_host_failed(host)
|
||||
|
@ -377,7 +367,7 @@ class StrategyBase:
|
|||
self._blocked_hosts[host_name] = True
|
||||
task = iterator.get_next_task_for_host(host)
|
||||
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
|
||||
self._callback.playbook_on_cleanup_task_start(task.get_name())
|
||||
self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task)
|
||||
self._queue_task(host, task, task_vars, connection_info)
|
||||
|
||||
self._process_pending_results(iterator)
|
||||
|
@ -398,31 +388,28 @@ class StrategyBase:
|
|||
# FIXME: getting the handlers from the iterators play should be
|
||||
# a method on the iterator, which may also filter the list
|
||||
# of handlers based on the notified list
|
||||
handlers = compile_block_list(iterator._play.handlers)
|
||||
|
||||
debug("handlers are: %s" % handlers)
|
||||
for handler in handlers:
|
||||
handler_name = handler.get_name()
|
||||
|
||||
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
|
||||
if not len(self.get_hosts_remaining(iterator._play)):
|
||||
self._callback.playbook_on_no_hosts_remaining()
|
||||
result = False
|
||||
break
|
||||
|
||||
self._callback.playbook_on_handler_task_start(handler_name)
|
||||
for host in self._notified_handlers[handler_name]:
|
||||
if not handler.has_triggered(host):
|
||||
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
|
||||
self._queue_task(host, handler, task_vars, connection_info)
|
||||
handler.flag_for_host(host)
|
||||
|
||||
self._process_pending_results(iterator)
|
||||
|
||||
self._wait_on_pending_results(iterator)
|
||||
|
||||
# wipe the notification list
|
||||
self._notified_handlers[handler_name] = []
|
||||
|
||||
debug("done running handlers, result is: %s" % result)
|
||||
for handler_block in iterator._play.handlers:
|
||||
debug("handlers are: %s" % handlers)
|
||||
# FIXME: handlers need to support the rescue/always portions of blocks too,
|
||||
# but this may take some work in the iterator and gets tricky when
|
||||
# we consider the ability of meta tasks to flush handlers
|
||||
for handler in handler_block.block:
|
||||
handler_name = handler.get_name()
|
||||
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
|
||||
if not len(self.get_hosts_remaining(iterator._play)):
|
||||
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
|
||||
result = False
|
||||
break
|
||||
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
|
||||
for host in self._notified_handlers[handler_name]:
|
||||
if not handler.has_triggered(host):
|
||||
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
|
||||
self._queue_task(host, handler, task_vars, connection_info)
|
||||
handler.flag_for_host(host)
|
||||
self._process_pending_results(iterator)
|
||||
self._wait_on_pending_results(iterator)
|
||||
# wipe the notification list
|
||||
self._notified_handlers[handler_name] = []
|
||||
debug("done running handlers, result is: %s" % result)
|
||||
return result
|
||||
|
|
|
@ -21,6 +21,7 @@ __metaclass__ = type
|
|||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.executor.play_iterator import PlayIterator
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.plugins import action_loader
|
||||
from ansible.plugins.strategies import StrategyBase
|
||||
|
@ -52,6 +53,9 @@ class StrategyModule(StrategyBase):
|
|||
lowest_cur_block = len(iterator._blocks)
|
||||
|
||||
for (k, v) in host_tasks.iteritems():
|
||||
if v is None:
|
||||
continue
|
||||
|
||||
(s, t) = v
|
||||
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
|
||||
lowest_cur_block = s.cur_block
|
||||
|
@ -131,7 +135,7 @@ class StrategyModule(StrategyBase):
|
|||
debug("done getting the remaining hosts for this loop")
|
||||
if len(hosts_left) == 0:
|
||||
debug("out of hosts to run on")
|
||||
self._callback.playbook_on_no_hosts_remaining()
|
||||
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
|
||||
result = False
|
||||
break
|
||||
|
||||
|
@ -184,7 +188,6 @@ class StrategyModule(StrategyBase):
|
|||
meta_action = task.args.get('_raw_params')
|
||||
if meta_action == 'noop':
|
||||
# FIXME: issue a callback for the noop here?
|
||||
print("%s => NOOP" % host)
|
||||
continue
|
||||
elif meta_action == 'flush_handlers':
|
||||
self.run_handlers(iterator, connection_info)
|
||||
|
@ -192,7 +195,7 @@ class StrategyModule(StrategyBase):
|
|||
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
|
||||
else:
|
||||
if not callback_sent:
|
||||
self._callback.playbook_on_task_start(task.get_name(), False)
|
||||
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
|
||||
callback_sent = True
|
||||
|
||||
self._blocked_hosts[host.get_name()] = True
|
||||
|
@ -234,6 +237,10 @@ class StrategyModule(StrategyBase):
|
|||
include_results = [ res._result ]
|
||||
|
||||
for include_result in include_results:
|
||||
# if the task result was skipped or failed, continue
|
||||
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
|
||||
continue
|
||||
|
||||
original_task = iterator.get_original_task(res._host, res._task)
|
||||
if original_task and original_task._role:
|
||||
include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
|
||||
|
@ -263,27 +270,31 @@ class StrategyModule(StrategyBase):
|
|||
noop_task.args['_raw_params'] = 'noop'
|
||||
noop_task.set_loader(iterator._play._loader)
|
||||
|
||||
all_tasks = dict((host, []) for host in hosts_left)
|
||||
all_blocks = dict((host, []) for host in hosts_left)
|
||||
for included_file in included_files:
|
||||
# included hosts get the task list while those excluded get an equal-length
|
||||
# list of noop tasks, to make sure that they continue running in lock-step
|
||||
try:
|
||||
new_tasks = self._load_included_file(included_file)
|
||||
new_blocks = self._load_included_file(included_file)
|
||||
except AnsibleError, e:
|
||||
for host in included_file._hosts:
|
||||
iterator.mark_host_failed(host)
|
||||
# FIXME: callback here?
|
||||
print(e)
|
||||
|
||||
noop_tasks = [noop_task for t in new_tasks]
|
||||
for host in hosts_left:
|
||||
if host in included_file._hosts:
|
||||
all_tasks[host].extend(new_tasks)
|
||||
else:
|
||||
all_tasks[host].extend(noop_tasks)
|
||||
for new_block in new_blocks:
|
||||
noop_block = Block(parent_block=task._block)
|
||||
noop_block.block = [noop_task for t in new_block.block]
|
||||
noop_block.always = [noop_task for t in new_block.always]
|
||||
noop_block.rescue = [noop_task for t in new_block.rescue]
|
||||
for host in hosts_left:
|
||||
if host in included_file._hosts:
|
||||
all_blocks[host].append(new_block)
|
||||
else:
|
||||
all_blocks[host].append(noop_block)
|
||||
|
||||
for host in hosts_left:
|
||||
iterator.add_tasks(host, all_tasks[host])
|
||||
iterator.add_tasks(host, all_blocks[host])
|
||||
|
||||
debug("results queue empty")
|
||||
except (IOError, EOFError), e:
|
||||
|
|
|
@ -70,6 +70,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False,
|
|||
default=None)
|
||||
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
|
||||
help="set additional variables as key=value or YAML/JSON", default=[])
|
||||
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
|
||||
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
|
||||
|
||||
if subset_opts:
|
||||
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
|
||||
|
|
|
@ -73,3 +73,20 @@ def stringc(text, color):
|
|||
|
||||
# --- end "pretty"
|
||||
|
||||
def colorize(lead, num, color):
|
||||
""" Print 'lead' = 'num' in 'color' """
|
||||
if num != 0 and ANSIBLE_COLOR and color is not None:
|
||||
return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
|
||||
else:
|
||||
return "%s=%-4s" % (lead, str(num))
|
||||
|
||||
def hostcolor(host, stats, color=True):
|
||||
if ANSIBLE_COLOR and color:
|
||||
if stats['failures'] != 0 or stats['unreachable'] != 0:
|
||||
return "%-37s" % stringc(host, 'red')
|
||||
elif stats['changed'] != 0:
|
||||
return "%-37s" % stringc(host, 'yellow')
|
||||
else:
|
||||
return "%-37s" % stringc(host, 'green')
|
||||
return "%-26s" % host
|
||||
|
||||
|
|
|
@ -112,3 +112,15 @@ class Display:
|
|||
if C.SYSTEM_WARNINGS:
|
||||
self._warning(msg)
|
||||
|
||||
def banner(self, msg, color=None):
|
||||
'''
|
||||
Prints a header-looking line with stars taking up to 80 columns
|
||||
of width (3 columns, minimum)
|
||||
'''
|
||||
msg = msg.strip()
|
||||
star_len = (80 - len(msg))
|
||||
if star_len < 0:
|
||||
star_len = 3
|
||||
stars = "*" * star_len
|
||||
self.display("\n%s %s" % (msg, stars), color=color)
|
||||
|
||||
|
|
|
@ -162,10 +162,9 @@ class VariableManager:
|
|||
all_vars = self._combine_vars(all_vars, self._group_vars_files['all'])
|
||||
|
||||
for group in host.get_groups():
|
||||
group_name = group.get_name()
|
||||
all_vars = self._combine_vars(all_vars, group.get_vars())
|
||||
if group_name in self._group_vars_files and group_name != 'all':
|
||||
all_vars = self._combine_vars(all_vars, self._group_vars_files[group_name])
|
||||
if group.name in self._group_vars_files and group.name != 'all':
|
||||
all_vars = self._combine_vars(all_vars, self._group_vars_files[group.name])
|
||||
|
||||
host_name = host.get_name()
|
||||
if host_name in self._host_vars_files:
|
||||
|
@ -228,7 +227,7 @@ class VariableManager:
|
|||
'''
|
||||
|
||||
(name, ext) = os.path.splitext(os.path.basename(path))
|
||||
if ext not in ('yml', 'yaml'):
|
||||
if ext not in ('.yml', '.yaml'):
|
||||
return os.path.basename(path)
|
||||
else:
|
||||
return name
|
||||
|
@ -239,11 +238,11 @@ class VariableManager:
|
|||
basename of the file without the extension
|
||||
'''
|
||||
|
||||
if os.path.isdir(path):
|
||||
if loader.is_directory(path):
|
||||
data = dict()
|
||||
|
||||
try:
|
||||
names = os.listdir(path)
|
||||
names = loader.list_directory(path)
|
||||
except os.error, err:
|
||||
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
|
||||
|
||||
|
@ -270,7 +269,7 @@ class VariableManager:
|
|||
the extension, for matching against a given inventory host name
|
||||
'''
|
||||
|
||||
if os.path.exists(path):
|
||||
if loader.path_exists(path):
|
||||
(name, data) = self._load_inventory_file(path, loader)
|
||||
self._host_vars_files[name] = data
|
||||
|
||||
|
@ -281,7 +280,7 @@ class VariableManager:
|
|||
the extension, for matching against a given inventory host name
|
||||
'''
|
||||
|
||||
if os.path.exists(path):
|
||||
if loader.path_exists(path):
|
||||
(name, data) = self._load_inventory_file(path, loader)
|
||||
self._group_vars_files[name] = data
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
- debug: msg="this is the include, a=={{a}}"
|
||||
- debug: msg="this is the second debug in the include"
|
||||
- debug: msg="this is the third debug in the include, and a is still {{a}}"
|
||||
#- debug: msg="this is the second debug in the include"
|
||||
#- debug: msg="this is the third debug in the include, and a is still {{a}}"
|
||||
|
||||
|
|
3
v2/samples/localhost_include.yml
Normal file
3
v2/samples/localhost_include.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
- debug: msg="this is the localhost include"
|
||||
- include: common_include.yml
|
||||
|
|
@ -6,3 +6,8 @@
|
|||
- block:
|
||||
- block:
|
||||
- debug: msg="are we there yet?"
|
||||
always:
|
||||
- debug: msg="a random always block"
|
||||
- fail:
|
||||
rescue:
|
||||
- debug: msg="rescuing from the fail"
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
always:
|
||||
- include: include.yml a=always
|
||||
|
||||
handlers:
|
||||
#handlers:
|
||||
#- name: foo
|
||||
# include: include.yml a="this is a handler"
|
||||
|
||||
|
|
|
@ -47,6 +47,9 @@ class DictDataLoader(DataLoader):
|
|||
def is_directory(self, path):
|
||||
return path in self._known_directories
|
||||
|
||||
def list_directory(self, path):
|
||||
return [x for x in self._known_directories]
|
||||
|
||||
def _add_known_directory(self, directory):
|
||||
if directory not in self._known_directories:
|
||||
self._known_directories.append(directory)
|
||||
|
|
|
@ -75,9 +75,3 @@ class TestBlock(unittest.TestCase):
|
|||
self.assertEqual(len(b.block), 1)
|
||||
assert isinstance(b.block[0], Task)
|
||||
|
||||
def test_block_compile(self):
|
||||
ds = [dict(action='foo')]
|
||||
b = Block.load(ds)
|
||||
tasks = b.compile()
|
||||
self.assertEqual(len(tasks), 1)
|
||||
self.assertIsInstance(tasks[0], Task)
|
||||
|
|
|
@ -24,6 +24,7 @@ from ansible.compat.tests.mock import patch, MagicMock
|
|||
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.vars import VariableManager
|
||||
|
||||
from test.mock.loader import DictDataLoader
|
||||
|
||||
|
@ -36,7 +37,8 @@ class TestPlaybook(unittest.TestCase):
|
|||
pass
|
||||
|
||||
def test_empty_playbook(self):
|
||||
p = Playbook()
|
||||
fake_loader = DictDataLoader({})
|
||||
p = Playbook(loader=fake_loader)
|
||||
|
||||
def test_basic_playbook(self):
|
||||
fake_loader = DictDataLoader({
|
||||
|
@ -61,6 +63,7 @@ class TestPlaybook(unittest.TestCase):
|
|||
|
||||
""",
|
||||
})
|
||||
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", fake_loader)
|
||||
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", fake_loader)
|
||||
vm = VariableManager()
|
||||
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
|
||||
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
|
||||
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.compat.tests import unittest
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.parsing.yaml.objects import AnsibleMapping
|
||||
from ansible.playbook.task_include import TaskInclude
|
||||
|
||||
from test.mock.loader import DictDataLoader
|
||||
|
||||
class TestTaskInclude(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self._fake_loader = DictDataLoader({
|
||||
"foo.yml": """
|
||||
- shell: echo "hello world"
|
||||
"""
|
||||
})
|
||||
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
def test_empty_task_include(self):
|
||||
ti = TaskInclude()
|
||||
|
||||
def test_basic_task_include(self):
|
||||
ti = TaskInclude.load(AnsibleMapping(include='foo.yml'), loader=self._fake_loader)
|
||||
tasks = ti.compile()
|
||||
|
||||
def test_task_include_with_loop(self):
|
||||
ti = TaskInclude.load(AnsibleMapping(include='foo.yml', with_items=['a', 'b', 'c']), loader=self._fake_loader)
|
||||
|
||||
def test_task_include_with_conditional(self):
|
||||
ti = TaskInclude.load(AnsibleMapping(include='foo.yml', when="1 == 1"), loader=self._fake_loader)
|
||||
|
||||
def test_task_include_with_tags(self):
|
||||
ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags="foo"), loader=self._fake_loader)
|
||||
ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags=["foo", "bar"]), loader=self._fake_loader)
|
||||
|
||||
def test_task_include_errors(self):
|
||||
self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include=''), loader=self._fake_loader)
|
||||
self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml', vars="1"), loader=self._fake_loader)
|
||||
self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml a=1', vars=dict(b=2)), loader=self._fake_loader)
|
||||
|
|
@ -35,8 +35,10 @@ class TestVariableManager(unittest.TestCase):
|
|||
pass
|
||||
|
||||
def test_basic_manager(self):
|
||||
fake_loader = DictDataLoader({})
|
||||
|
||||
v = VariableManager()
|
||||
self.assertEqual(v.get_vars(), dict())
|
||||
self.assertEqual(v.get_vars(loader=fake_loader), dict())
|
||||
|
||||
self.assertEqual(
|
||||
v._merge_dicts(
|
||||
|
@ -52,23 +54,26 @@ class TestVariableManager(unittest.TestCase):
|
|||
)
|
||||
|
||||
|
||||
def test_manager_extra_vars(self):
|
||||
def test_variable_manager_extra_vars(self):
|
||||
fake_loader = DictDataLoader({})
|
||||
|
||||
extra_vars = dict(a=1, b=2, c=3)
|
||||
v = VariableManager()
|
||||
v.set_extra_vars(extra_vars)
|
||||
|
||||
self.assertEqual(v.get_vars(), extra_vars)
|
||||
self.assertIsNot(v.extra_vars, extra_vars)
|
||||
for (key, val) in extra_vars.iteritems():
|
||||
self.assertEqual(v.get_vars(loader=fake_loader).get(key), val)
|
||||
self.assertIsNot(v.extra_vars.get(key), val)
|
||||
|
||||
def test_manager_host_vars_file(self):
|
||||
def test_variable_manager_host_vars_file(self):
|
||||
fake_loader = DictDataLoader({
|
||||
"host_vars/hostname1.yml": """
|
||||
foo: bar
|
||||
"""
|
||||
})
|
||||
|
||||
v = VariableManager(loader=fake_loader)
|
||||
v.add_host_vars_file("host_vars/hostname1.yml")
|
||||
v = VariableManager()
|
||||
v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
|
||||
self.assertIn("hostname1", v._host_vars_files)
|
||||
self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar"))
|
||||
|
||||
|
@ -77,37 +82,43 @@ class TestVariableManager(unittest.TestCase):
|
|||
mock_host.get_vars.return_value = dict()
|
||||
mock_host.get_groups.return_value = ()
|
||||
|
||||
self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar"))
|
||||
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar")
|
||||
|
||||
def test_manager_group_vars_file(self):
|
||||
def test_variable_manager_group_vars_file(self):
|
||||
fake_loader = DictDataLoader({
|
||||
"group_vars/somegroup.yml": """
|
||||
foo: bar
|
||||
"""
|
||||
})
|
||||
|
||||
v = VariableManager(loader=fake_loader)
|
||||
v.add_group_vars_file("group_vars/somegroup.yml")
|
||||
v = VariableManager()
|
||||
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
|
||||
self.assertIn("somegroup", v._group_vars_files)
|
||||
self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar"))
|
||||
|
||||
mock_group = MagicMock()
|
||||
mock_group.name.return_value = "somegroup"
|
||||
mock_group.get_ancestors.return_value = ()
|
||||
|
||||
mock_host = MagicMock()
|
||||
mock_host.get_name.return_value = "hostname1"
|
||||
mock_host.get_vars.return_value = dict()
|
||||
mock_host.get_groups.return_value = ["somegroup"]
|
||||
mock_host.get_groups.return_value = (mock_group)
|
||||
|
||||
self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar"))
|
||||
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar")
|
||||
|
||||
def test_variable_manager_play_vars(self):
|
||||
fake_loader = DictDataLoader({})
|
||||
|
||||
def test_manager_play_vars(self):
|
||||
mock_play = MagicMock()
|
||||
mock_play.get_vars.return_value = dict(foo="bar")
|
||||
mock_play.get_roles.return_value = []
|
||||
mock_play.get_vars_files.return_value = []
|
||||
|
||||
v = VariableManager()
|
||||
self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar"))
|
||||
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar")
|
||||
|
||||
def test_manager_play_vars_files(self):
|
||||
def test_variable_manager_play_vars_files(self):
|
||||
fake_loader = DictDataLoader({
|
||||
"/path/to/somefile.yml": """
|
||||
foo: bar
|
||||
|
@ -119,13 +130,15 @@ class TestVariableManager(unittest.TestCase):
|
|||
mock_play.get_roles.return_value = []
|
||||
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
|
||||
|
||||
v = VariableManager(loader=fake_loader)
|
||||
self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar"))
|
||||
v = VariableManager()
|
||||
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar")
|
||||
|
||||
def test_variable_manager_task_vars(self):
|
||||
fake_loader = DictDataLoader({})
|
||||
|
||||
def test_manager_task_vars(self):
|
||||
mock_task = MagicMock()
|
||||
mock_task.get_vars.return_value = dict(foo="bar")
|
||||
|
||||
v = VariableManager()
|
||||
self.assertEqual(v.get_vars(task=mock_task), dict(foo="bar"))
|
||||
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task).get("foo"), "bar")
|
||||
|
||||
|
|
Loading…
Reference in a new issue