mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Merge branch 'threaded_receiver' into devel
This commit is contained in:
commit
1b54d3b6dc
7 changed files with 461 additions and 389 deletions
|
@ -55,12 +55,12 @@ class WorkerProcess(multiprocessing.Process):
|
||||||
for reading later.
|
for reading later.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, rslt_q, play, host, task, task_vars, play_context, loader, variable_manager, shared_loader_obj):
|
def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
|
||||||
|
|
||||||
super(WorkerProcess, self).__init__()
|
super(WorkerProcess, self).__init__()
|
||||||
# takes a task queue manager as the sole param:
|
# takes a task queue manager as the sole param:
|
||||||
self._rslt_q = rslt_q
|
self._rslt_q = rslt_q
|
||||||
self._play = play
|
self._task_vars = task_vars
|
||||||
self._host = host
|
self._host = host
|
||||||
self._task = task
|
self._task = task
|
||||||
self._play_context = play_context
|
self._play_context = play_context
|
||||||
|
@ -68,8 +68,6 @@ class WorkerProcess(multiprocessing.Process):
|
||||||
self._variable_manager = variable_manager
|
self._variable_manager = variable_manager
|
||||||
self._shared_loader_obj = shared_loader_obj
|
self._shared_loader_obj = shared_loader_obj
|
||||||
|
|
||||||
self._task_vars = task_vars
|
|
||||||
|
|
||||||
# dupe stdin, if we have one
|
# dupe stdin, if we have one
|
||||||
self._new_stdin = sys.stdin
|
self._new_stdin = sys.stdin
|
||||||
try:
|
try:
|
||||||
|
@ -151,4 +149,3 @@ class WorkerProcess(multiprocessing.Process):
|
||||||
#with open('worker_%06d.stats' % os.getpid(), 'w') as f:
|
#with open('worker_%06d.stats' % os.getpid(), 'w') as f:
|
||||||
# f.write(s.getvalue())
|
# f.write(s.getvalue())
|
||||||
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
|
@ -22,22 +22,16 @@ __metaclass__ = type
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
from collections import deque
|
|
||||||
|
|
||||||
from ansible import constants as C
|
from ansible import constants as C
|
||||||
from ansible.compat.six import string_types
|
from ansible.compat.six import string_types
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.executor import action_write_locks
|
|
||||||
from ansible.executor.play_iterator import PlayIterator
|
from ansible.executor.play_iterator import PlayIterator
|
||||||
from ansible.executor.process.worker import WorkerProcess
|
|
||||||
from ansible.executor.stats import AggregateStats
|
from ansible.executor.stats import AggregateStats
|
||||||
from ansible.module_utils._text import to_text
|
from ansible.module_utils._text import to_text
|
||||||
from ansible.playbook.block import Block
|
from ansible.playbook.block import Block
|
||||||
from ansible.playbook.play_context import PlayContext
|
from ansible.playbook.play_context import PlayContext
|
||||||
from ansible.plugins import action_loader, callback_loader, connection_loader, filter_loader, lookup_loader, module_loader, strategy_loader, test_loader
|
from ansible.plugins import callback_loader, strategy_loader, module_loader
|
||||||
from ansible.plugins.callback import CallbackBase
|
from ansible.plugins.callback import CallbackBase
|
||||||
from ansible.template import Templar
|
from ansible.template import Templar
|
||||||
from ansible.utils.helpers import pct_to_int
|
from ansible.utils.helpers import pct_to_int
|
||||||
|
@ -52,23 +46,6 @@ except ImportError:
|
||||||
__all__ = ['TaskQueueManager']
|
__all__ = ['TaskQueueManager']
|
||||||
|
|
||||||
|
|
||||||
# TODO: this should probably be in the plugins/__init__.py, with
|
|
||||||
# a smarter mechanism to set all of the attributes based on
|
|
||||||
# the loaders created there
|
|
||||||
class SharedPluginLoaderObj:
|
|
||||||
'''
|
|
||||||
A simple object to make pass the various plugin loaders to
|
|
||||||
the forked processes over the queue easier
|
|
||||||
'''
|
|
||||||
def __init__(self):
|
|
||||||
self.action_loader = action_loader
|
|
||||||
self.connection_loader = connection_loader
|
|
||||||
self.filter_loader = filter_loader
|
|
||||||
self.test_loader = test_loader
|
|
||||||
self.lookup_loader = lookup_loader
|
|
||||||
self.module_loader = module_loader
|
|
||||||
|
|
||||||
|
|
||||||
class TaskQueueManager:
|
class TaskQueueManager:
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
@ -100,8 +77,6 @@ class TaskQueueManager:
|
||||||
self._run_additional_callbacks = run_additional_callbacks
|
self._run_additional_callbacks = run_additional_callbacks
|
||||||
self._run_tree = run_tree
|
self._run_tree = run_tree
|
||||||
|
|
||||||
self._iterator = None
|
|
||||||
|
|
||||||
self._callbacks_loaded = False
|
self._callbacks_loaded = False
|
||||||
self._callback_plugins = []
|
self._callback_plugins = []
|
||||||
self._start_at_done = False
|
self._start_at_done = False
|
||||||
|
@ -123,86 +98,12 @@ class TaskQueueManager:
|
||||||
self._failed_hosts = dict()
|
self._failed_hosts = dict()
|
||||||
self._unreachable_hosts = dict()
|
self._unreachable_hosts = dict()
|
||||||
|
|
||||||
# the "queue" for the background thread to use
|
|
||||||
self._queued_tasks = deque()
|
|
||||||
self._queued_tasks_lock = threading.Lock()
|
|
||||||
|
|
||||||
# the background queuing thread
|
|
||||||
self._queue_thread = None
|
|
||||||
|
|
||||||
self._workers = []
|
|
||||||
self._final_q = multiprocessing.Queue()
|
self._final_q = multiprocessing.Queue()
|
||||||
|
|
||||||
# A temporary file (opened pre-fork) used by connection
|
# A temporary file (opened pre-fork) used by connection
|
||||||
# plugins for inter-process locking.
|
# plugins for inter-process locking.
|
||||||
self._connection_lockfile = tempfile.TemporaryFile()
|
self._connection_lockfile = tempfile.TemporaryFile()
|
||||||
|
|
||||||
def _queue_thread_main(self):
|
|
||||||
|
|
||||||
# create a dummy object with plugin loaders set as an easier
|
|
||||||
# way to share them with the forked processes
|
|
||||||
shared_loader_obj = SharedPluginLoaderObj()
|
|
||||||
|
|
||||||
display.debug("queuing thread starting")
|
|
||||||
while not self._terminated:
|
|
||||||
available_workers = []
|
|
||||||
for idx, entry in enumerate(self._workers):
|
|
||||||
(worker_prc, _) = entry
|
|
||||||
if worker_prc is None or not worker_prc.is_alive():
|
|
||||||
available_workers.append(idx)
|
|
||||||
|
|
||||||
if len(available_workers) == 0:
|
|
||||||
time.sleep(0.01)
|
|
||||||
continue
|
|
||||||
|
|
||||||
for worker_idx in available_workers:
|
|
||||||
try:
|
|
||||||
self._queued_tasks_lock.acquire()
|
|
||||||
(host, task, task_vars, play_context) = self._queued_tasks.pop()
|
|
||||||
except IndexError:
|
|
||||||
break
|
|
||||||
finally:
|
|
||||||
self._queued_tasks_lock.release()
|
|
||||||
|
|
||||||
if task.action not in action_write_locks.action_write_locks:
|
|
||||||
display.debug('Creating lock for %s' % task.action)
|
|
||||||
action_write_locks.action_write_locks[task.action] = multiprocessing.Lock()
|
|
||||||
|
|
||||||
try:
|
|
||||||
worker_prc = WorkerProcess(
|
|
||||||
self._final_q,
|
|
||||||
self._iterator._play,
|
|
||||||
host,
|
|
||||||
task,
|
|
||||||
task_vars,
|
|
||||||
play_context,
|
|
||||||
self._loader,
|
|
||||||
self._variable_manager,
|
|
||||||
shared_loader_obj,
|
|
||||||
)
|
|
||||||
self._workers[worker_idx][0] = worker_prc
|
|
||||||
worker_prc.start()
|
|
||||||
display.debug("worker is %d (out of %d available)" % (worker_idx+1, len(self._workers)))
|
|
||||||
|
|
||||||
except (EOFError, IOError, AssertionError) as e:
|
|
||||||
# most likely an abort
|
|
||||||
display.debug("got an error while queuing: %s" % e)
|
|
||||||
break
|
|
||||||
|
|
||||||
display.debug("queuing thread exiting")
|
|
||||||
|
|
||||||
def queue_task(self, host, task, task_vars, play_context):
|
|
||||||
self._queued_tasks_lock.acquire()
|
|
||||||
self._queued_tasks.append((host, task, task_vars, play_context))
|
|
||||||
self._queued_tasks_lock.release()
|
|
||||||
|
|
||||||
def queue_multiple_tasks(self, items, play_context):
|
|
||||||
for item in items:
|
|
||||||
(host, task, task_vars) = item
|
|
||||||
self._queued_tasks_lock.acquire()
|
|
||||||
self._queued_tasks.append((host, task, task_vars, play_context))
|
|
||||||
self._queued_tasks_lock.release()
|
|
||||||
|
|
||||||
def _initialize_processes(self, num):
|
def _initialize_processes(self, num):
|
||||||
self._workers = []
|
self._workers = []
|
||||||
|
|
||||||
|
@ -307,10 +208,6 @@ class TaskQueueManager:
|
||||||
if not self._callbacks_loaded:
|
if not self._callbacks_loaded:
|
||||||
self.load_callbacks()
|
self.load_callbacks()
|
||||||
|
|
||||||
if self._queue_thread is None:
|
|
||||||
self._queue_thread = threading.Thread(target=self._queue_thread_main)
|
|
||||||
self._queue_thread.start()
|
|
||||||
|
|
||||||
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
|
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
|
||||||
templar = Templar(loader=self._loader, variables=all_vars)
|
templar = Templar(loader=self._loader, variables=all_vars)
|
||||||
|
|
||||||
|
@ -356,7 +253,7 @@ class TaskQueueManager:
|
||||||
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
|
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
|
||||||
|
|
||||||
# build the iterator
|
# build the iterator
|
||||||
self._iterator = PlayIterator(
|
iterator = PlayIterator(
|
||||||
inventory=self._inventory,
|
inventory=self._inventory,
|
||||||
play=new_play,
|
play=new_play,
|
||||||
play_context=play_context,
|
play_context=play_context,
|
||||||
|
@ -371,7 +268,7 @@ class TaskQueueManager:
|
||||||
# hosts so we know what failed this round.
|
# hosts so we know what failed this round.
|
||||||
for host_name in self._failed_hosts.keys():
|
for host_name in self._failed_hosts.keys():
|
||||||
host = self._inventory.get_host(host_name)
|
host = self._inventory.get_host(host_name)
|
||||||
self._iterator.mark_host_failed(host)
|
iterator.mark_host_failed(host)
|
||||||
|
|
||||||
self.clear_failed_hosts()
|
self.clear_failed_hosts()
|
||||||
|
|
||||||
|
@ -382,12 +279,13 @@ class TaskQueueManager:
|
||||||
self._start_at_done = True
|
self._start_at_done = True
|
||||||
|
|
||||||
# and run the play using the strategy and cleanup on way out
|
# and run the play using the strategy and cleanup on way out
|
||||||
play_return = strategy.run(self._iterator, play_context)
|
play_return = strategy.run(iterator, play_context)
|
||||||
|
|
||||||
# now re-save the hosts that failed from the iterator to our internal list
|
# now re-save the hosts that failed from the iterator to our internal list
|
||||||
for host_name in self._iterator.get_failed_hosts():
|
for host_name in iterator.get_failed_hosts():
|
||||||
self._failed_hosts[host_name] = True
|
self._failed_hosts[host_name] = True
|
||||||
|
|
||||||
|
strategy.cleanup()
|
||||||
self._cleanup_processes()
|
self._cleanup_processes()
|
||||||
return play_return
|
return play_return
|
||||||
|
|
||||||
|
@ -398,6 +296,7 @@ class TaskQueueManager:
|
||||||
self._cleanup_processes()
|
self._cleanup_processes()
|
||||||
|
|
||||||
def _cleanup_processes(self):
|
def _cleanup_processes(self):
|
||||||
|
if hasattr(self, '_workers'):
|
||||||
for (worker_prc, rslt_q) in self._workers:
|
for (worker_prc, rslt_q) in self._workers:
|
||||||
rslt_q.close()
|
rslt_q.close()
|
||||||
if worker_prc and worker_prc.is_alive():
|
if worker_prc and worker_prc.is_alive():
|
||||||
|
|
|
@ -25,6 +25,7 @@ from ansible.compat.six import text_type
|
||||||
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
|
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
|
||||||
from ansible.playbook.attribute import FieldAttribute
|
from ansible.playbook.attribute import FieldAttribute
|
||||||
from ansible.template import Templar
|
from ansible.template import Templar
|
||||||
|
from ansible.module_utils._text import to_native
|
||||||
|
|
||||||
class Conditional:
|
class Conditional:
|
||||||
|
|
||||||
|
@ -72,7 +73,7 @@ class Conditional:
|
||||||
if not self._check_conditional(conditional, templar, all_vars):
|
if not self._check_conditional(conditional, templar, all_vars):
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (conditional, e), obj=ds)
|
raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
@ -19,19 +19,28 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
from collections import deque
|
||||||
|
from multiprocessing import Lock
|
||||||
from jinja2.exceptions import UndefinedError
|
from jinja2.exceptions import UndefinedError
|
||||||
|
|
||||||
from ansible.compat.six.moves import queue as Queue
|
from ansible.compat.six.moves import queue as Queue
|
||||||
from ansible.compat.six import iteritems, string_types
|
from ansible.compat.six import iteritems, string_types
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
|
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
|
||||||
|
from ansible.executor import action_write_locks
|
||||||
|
from ansible.executor.process.worker import WorkerProcess
|
||||||
from ansible.executor.task_result import TaskResult
|
from ansible.executor.task_result import TaskResult
|
||||||
from ansible.inventory.host import Host
|
from ansible.inventory.host import Host
|
||||||
from ansible.inventory.group import Group
|
from ansible.inventory.group import Group
|
||||||
|
from ansible.module_utils.facts import Facts
|
||||||
from ansible.playbook.helpers import load_list_of_blocks
|
from ansible.playbook.helpers import load_list_of_blocks
|
||||||
from ansible.playbook.included_file import IncludedFile
|
from ansible.playbook.included_file import IncludedFile
|
||||||
from ansible.playbook.task_include import TaskInclude
|
from ansible.playbook.task_include import TaskInclude
|
||||||
from ansible.playbook.role_include import IncludeRole
|
from ansible.playbook.role_include import IncludeRole
|
||||||
from ansible.plugins import action_loader
|
from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
|
||||||
from ansible.template import Templar
|
from ansible.template import Templar
|
||||||
from ansible.vars import combine_vars, strip_internal_keys
|
from ansible.vars import combine_vars, strip_internal_keys
|
||||||
from ansible.module_utils._text import to_text
|
from ansible.module_utils._text import to_text
|
||||||
|
@ -45,6 +54,41 @@ except ImportError:
|
||||||
|
|
||||||
__all__ = ['StrategyBase']
|
__all__ = ['StrategyBase']
|
||||||
|
|
||||||
|
# TODO: this should probably be in the plugins/__init__.py, with
|
||||||
|
# a smarter mechanism to set all of the attributes based on
|
||||||
|
# the loaders created there
|
||||||
|
class SharedPluginLoaderObj:
|
||||||
|
'''
|
||||||
|
A simple object to make pass the various plugin loaders to
|
||||||
|
the forked processes over the queue easier
|
||||||
|
'''
|
||||||
|
def __init__(self):
|
||||||
|
self.action_loader = action_loader
|
||||||
|
self.connection_loader = connection_loader
|
||||||
|
self.filter_loader = filter_loader
|
||||||
|
self.test_loader = test_loader
|
||||||
|
self.lookup_loader = lookup_loader
|
||||||
|
self.module_loader = module_loader
|
||||||
|
|
||||||
|
|
||||||
|
_sentinel = object()
|
||||||
|
def results_thread_main(strategy):
|
||||||
|
#print("RESULT THREAD STARTING: %s" % threading.current_thread())
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
result = strategy._final_q.get()
|
||||||
|
if type(result) == object:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
#print("result in thread is: %s" % result._result)
|
||||||
|
strategy._results_lock.acquire()
|
||||||
|
strategy._results.append(result)
|
||||||
|
strategy._results_lock.release()
|
||||||
|
except (IOError, EOFError):
|
||||||
|
break
|
||||||
|
except Queue.Empty:
|
||||||
|
pass
|
||||||
|
#print("RESULT THREAD EXITED: %s" % threading.current_thread())
|
||||||
|
|
||||||
class StrategyBase:
|
class StrategyBase:
|
||||||
|
|
||||||
|
@ -56,6 +100,7 @@ class StrategyBase:
|
||||||
def __init__(self, tqm):
|
def __init__(self, tqm):
|
||||||
self._tqm = tqm
|
self._tqm = tqm
|
||||||
self._inventory = tqm.get_inventory()
|
self._inventory = tqm.get_inventory()
|
||||||
|
self._workers = tqm.get_workers()
|
||||||
self._notified_handlers = tqm._notified_handlers
|
self._notified_handlers = tqm._notified_handlers
|
||||||
self._listening_handlers = tqm._listening_handlers
|
self._listening_handlers = tqm._listening_handlers
|
||||||
self._variable_manager = tqm.get_variable_manager()
|
self._variable_manager = tqm.get_variable_manager()
|
||||||
|
@ -63,16 +108,30 @@ class StrategyBase:
|
||||||
self._final_q = tqm._final_q
|
self._final_q = tqm._final_q
|
||||||
self._step = getattr(tqm._options, 'step', False)
|
self._step = getattr(tqm._options, 'step', False)
|
||||||
self._diff = getattr(tqm._options, 'diff', False)
|
self._diff = getattr(tqm._options, 'diff', False)
|
||||||
|
|
||||||
# Backwards compat: self._display isn't really needed, just import the global display and use that.
|
# Backwards compat: self._display isn't really needed, just import the global display and use that.
|
||||||
self._display = display
|
self._display = display
|
||||||
|
|
||||||
# internal counters
|
# internal counters
|
||||||
self._pending_results = 0
|
self._pending_results = 0
|
||||||
|
self._cur_worker = 0
|
||||||
|
|
||||||
# this dictionary is used to keep track of hosts that have
|
# this dictionary is used to keep track of hosts that have
|
||||||
# outstanding tasks still in queue
|
# outstanding tasks still in queue
|
||||||
self._blocked_hosts = dict()
|
self._blocked_hosts = dict()
|
||||||
|
|
||||||
|
self._results = deque()
|
||||||
|
self._results_lock = threading.Condition(threading.Lock())
|
||||||
|
|
||||||
|
#print("creating thread for strategy %s" % id(self))
|
||||||
|
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
|
||||||
|
self._results_thread.daemon = True
|
||||||
|
self._results_thread.start()
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
self._final_q.put(_sentinel)
|
||||||
|
self._results_thread.join()
|
||||||
|
|
||||||
def run(self, iterator, play_context, result=0):
|
def run(self, iterator, play_context, result=0):
|
||||||
# save the failed/unreachable hosts, as the run_handlers()
|
# save the failed/unreachable hosts, as the run_handlers()
|
||||||
# method will clear that information during its execution
|
# method will clear that information during its execution
|
||||||
|
@ -118,10 +177,57 @@ class StrategyBase:
|
||||||
|
|
||||||
def _queue_task(self, host, task, task_vars, play_context):
|
def _queue_task(self, host, task, task_vars, play_context):
|
||||||
''' handles queueing the task up to be sent to a worker '''
|
''' handles queueing the task up to be sent to a worker '''
|
||||||
self._tqm.queue_task(host, task, task_vars, play_context)
|
|
||||||
self._pending_results += 1
|
|
||||||
|
|
||||||
def _process_pending_results(self, iterator, one_pass=False, timeout=0.001):
|
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
|
||||||
|
|
||||||
|
# Add a write lock for tasks.
|
||||||
|
# Maybe this should be added somewhere further up the call stack but
|
||||||
|
# this is the earliest in the code where we have task (1) extracted
|
||||||
|
# into its own variable and (2) there's only a single code path
|
||||||
|
# leading to the module being run. This is called by three
|
||||||
|
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
|
||||||
|
# free.py::run() so we'd have to add to all three to do it there.
|
||||||
|
# The next common higher level is __init__.py::run() and that has
|
||||||
|
# tasks inside of play_iterator so we'd have to extract them to do it
|
||||||
|
# there.
|
||||||
|
|
||||||
|
if task.action not in action_write_locks.action_write_locks:
|
||||||
|
display.debug('Creating lock for %s' % task.action)
|
||||||
|
action_write_locks.action_write_locks[task.action] = Lock()
|
||||||
|
|
||||||
|
# and then queue the new task
|
||||||
|
try:
|
||||||
|
|
||||||
|
# create a dummy object with plugin loaders set as an easier
|
||||||
|
# way to share them with the forked processes
|
||||||
|
shared_loader_obj = SharedPluginLoaderObj()
|
||||||
|
|
||||||
|
queued = False
|
||||||
|
starting_worker = self._cur_worker
|
||||||
|
while True:
|
||||||
|
(worker_prc, rslt_q) = self._workers[self._cur_worker]
|
||||||
|
if worker_prc is None or not worker_prc.is_alive():
|
||||||
|
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
|
||||||
|
self._workers[self._cur_worker][0] = worker_prc
|
||||||
|
worker_prc.start()
|
||||||
|
display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
|
||||||
|
queued = True
|
||||||
|
self._cur_worker += 1
|
||||||
|
if self._cur_worker >= len(self._workers):
|
||||||
|
self._cur_worker = 0
|
||||||
|
if queued:
|
||||||
|
break
|
||||||
|
elif self._cur_worker == starting_worker:
|
||||||
|
time.sleep(0.0001)
|
||||||
|
|
||||||
|
self._pending_results += 1
|
||||||
|
except (EOFError, IOError, AssertionError) as e:
|
||||||
|
# most likely an abort
|
||||||
|
display.debug("got an error while queuing: %s" % e)
|
||||||
|
return
|
||||||
|
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
|
||||||
|
|
||||||
|
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
|
||||||
'''
|
'''
|
||||||
Reads results off the final queue and takes appropriate action
|
Reads results off the final queue and takes appropriate action
|
||||||
based on the result (executing callbacks, updating state, etc.).
|
based on the result (executing callbacks, updating state, etc.).
|
||||||
|
@ -180,10 +286,16 @@ class StrategyBase:
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
passes = 1
|
cur_pass = 0
|
||||||
while not self._tqm._terminated and passes < 3:
|
while True:
|
||||||
try:
|
try:
|
||||||
task_result = self._final_q.get(timeout=timeout)
|
self._results_lock.acquire()
|
||||||
|
task_result = self._results.pop()
|
||||||
|
except IndexError:
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
self._results_lock.release()
|
||||||
|
|
||||||
original_host = get_original_host(task_result._host)
|
original_host = get_original_host(task_result._host)
|
||||||
original_task = iterator.get_original_task(original_host, task_result._task)
|
original_task = iterator.get_original_task(original_host, task_result._task)
|
||||||
task_result._host = original_host
|
task_result._host = original_host
|
||||||
|
@ -206,6 +318,7 @@ class StrategyBase:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if original_task.register:
|
if original_task.register:
|
||||||
|
#print("^ REGISTERING RESULT %s" % original_task.register)
|
||||||
if original_task.run_once:
|
if original_task.run_once:
|
||||||
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
|
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
|
||||||
else:
|
else:
|
||||||
|
@ -394,12 +507,11 @@ class StrategyBase:
|
||||||
|
|
||||||
ret_results.append(task_result)
|
ret_results.append(task_result)
|
||||||
|
|
||||||
except Queue.Empty:
|
if one_pass or max_passes is not None and (cur_pass+1) >= max_passes:
|
||||||
passes += 1
|
|
||||||
|
|
||||||
if one_pass:
|
|
||||||
break
|
break
|
||||||
|
|
||||||
|
cur_pass += 1
|
||||||
|
|
||||||
return ret_results
|
return ret_results
|
||||||
|
|
||||||
def _wait_on_pending_results(self, iterator):
|
def _wait_on_pending_results(self, iterator):
|
||||||
|
@ -411,18 +523,14 @@ class StrategyBase:
|
||||||
ret_results = []
|
ret_results = []
|
||||||
|
|
||||||
display.debug("waiting for pending results...")
|
display.debug("waiting for pending results...")
|
||||||
dead_check = 10
|
|
||||||
while self._pending_results > 0 and not self._tqm._terminated:
|
while self._pending_results > 0 and not self._tqm._terminated:
|
||||||
|
|
||||||
|
if self._tqm.has_dead_workers():
|
||||||
|
raise AnsibleError("A worker was found in a dead state")
|
||||||
|
|
||||||
results = self._process_pending_results(iterator)
|
results = self._process_pending_results(iterator)
|
||||||
ret_results.extend(results)
|
ret_results.extend(results)
|
||||||
|
|
||||||
dead_check -= 1
|
|
||||||
if dead_check == 0:
|
|
||||||
if self._pending_results > 0 and self._tqm.has_dead_workers():
|
|
||||||
raise AnsibleError("A worker was found in a dead state")
|
|
||||||
dead_check = 10
|
|
||||||
|
|
||||||
display.debug("no more pending results, returning what we have")
|
display.debug("no more pending results, returning what we have")
|
||||||
|
|
||||||
return ret_results
|
return ret_results
|
||||||
|
|
|
@ -182,9 +182,7 @@ class StrategyModule(StrategyBase):
|
||||||
any_errors_fatal = False
|
any_errors_fatal = False
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
items_to_queue = []
|
|
||||||
for (host, task) in host_tasks:
|
for (host, task) in host_tasks:
|
||||||
|
|
||||||
if not task:
|
if not task:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -259,26 +257,21 @@ class StrategyModule(StrategyBase):
|
||||||
display.debug("sending task start callback")
|
display.debug("sending task start callback")
|
||||||
|
|
||||||
self._blocked_hosts[host.get_name()] = True
|
self._blocked_hosts[host.get_name()] = True
|
||||||
items_to_queue.append((host, task, task_vars))
|
self._queue_task(host, task, task_vars, play_context)
|
||||||
self._pending_results += 1
|
|
||||||
del task_vars
|
del task_vars
|
||||||
|
|
||||||
# if we're bypassing the host loop, break out now
|
# if we're bypassing the host loop, break out now
|
||||||
if run_once:
|
if run_once:
|
||||||
break
|
break
|
||||||
|
|
||||||
# FIXME: probably not required here any more with the result proc
|
results += self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1)))
|
||||||
# having been removed, so there's no only a single result
|
|
||||||
# queue for the main thread
|
|
||||||
results += self._process_pending_results(iterator, one_pass=True)
|
|
||||||
|
|
||||||
self._tqm.queue_multiple_tasks(items_to_queue, play_context)
|
|
||||||
|
|
||||||
# go to next host/task group
|
# go to next host/task group
|
||||||
if skip_rest:
|
if skip_rest:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
display.debug("done queuing things up, now waiting for results queue to drain")
|
display.debug("done queuing things up, now waiting for results queue to drain")
|
||||||
|
if self._pending_results > 0:
|
||||||
results += self._wait_on_pending_results(iterator)
|
results += self._wait_on_pending_results(iterator)
|
||||||
host_results.extend(results)
|
host_results.extend(results)
|
||||||
|
|
||||||
|
|
|
@ -45,16 +45,49 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def test_strategy_base_init(self):
|
def test_strategy_base_init(self):
|
||||||
|
queue_items = []
|
||||||
|
def _queue_empty(*args, **kwargs):
|
||||||
|
return len(queue_items) == 0
|
||||||
|
def _queue_get(*args, **kwargs):
|
||||||
|
if len(queue_items) == 0:
|
||||||
|
raise Queue.Empty
|
||||||
|
else:
|
||||||
|
return queue_items.pop()
|
||||||
|
def _queue_put(item, *args, **kwargs):
|
||||||
|
queue_items.append(item)
|
||||||
|
|
||||||
|
mock_queue = MagicMock()
|
||||||
|
mock_queue.empty.side_effect = _queue_empty
|
||||||
|
mock_queue.get.side_effect = _queue_get
|
||||||
|
mock_queue.put.side_effect = _queue_put
|
||||||
|
|
||||||
mock_tqm = MagicMock(TaskQueueManager)
|
mock_tqm = MagicMock(TaskQueueManager)
|
||||||
mock_tqm._final_q = MagicMock()
|
mock_tqm._final_q = mock_queue
|
||||||
mock_tqm._options = MagicMock()
|
mock_tqm._options = MagicMock()
|
||||||
mock_tqm._notified_handlers = {}
|
mock_tqm._notified_handlers = {}
|
||||||
mock_tqm._listening_handlers = {}
|
mock_tqm._listening_handlers = {}
|
||||||
strategy_base = StrategyBase(tqm=mock_tqm)
|
strategy_base = StrategyBase(tqm=mock_tqm)
|
||||||
|
strategy_base.cleanup()
|
||||||
|
|
||||||
def test_strategy_base_run(self):
|
def test_strategy_base_run(self):
|
||||||
|
queue_items = []
|
||||||
|
def _queue_empty(*args, **kwargs):
|
||||||
|
return len(queue_items) == 0
|
||||||
|
def _queue_get(*args, **kwargs):
|
||||||
|
if len(queue_items) == 0:
|
||||||
|
raise Queue.Empty
|
||||||
|
else:
|
||||||
|
return queue_items.pop()
|
||||||
|
def _queue_put(item, *args, **kwargs):
|
||||||
|
queue_items.append(item)
|
||||||
|
|
||||||
|
mock_queue = MagicMock()
|
||||||
|
mock_queue.empty.side_effect = _queue_empty
|
||||||
|
mock_queue.get.side_effect = _queue_get
|
||||||
|
mock_queue.put.side_effect = _queue_put
|
||||||
|
|
||||||
mock_tqm = MagicMock(TaskQueueManager)
|
mock_tqm = MagicMock(TaskQueueManager)
|
||||||
mock_tqm._final_q = MagicMock()
|
mock_tqm._final_q = mock_queue
|
||||||
mock_tqm._stats = MagicMock()
|
mock_tqm._stats = MagicMock()
|
||||||
mock_tqm._notified_handlers = {}
|
mock_tqm._notified_handlers = {}
|
||||||
mock_tqm._listening_handlers = {}
|
mock_tqm._listening_handlers = {}
|
||||||
|
@ -87,8 +120,25 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
mock_tqm._unreachable_hosts = dict(host1=True)
|
mock_tqm._unreachable_hosts = dict(host1=True)
|
||||||
mock_iterator.get_failed_hosts.return_value = []
|
mock_iterator.get_failed_hosts.return_value = []
|
||||||
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_UNREACHABLE_HOSTS)
|
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_UNREACHABLE_HOSTS)
|
||||||
|
strategy_base.cleanup()
|
||||||
|
|
||||||
def test_strategy_base_get_hosts(self):
|
def test_strategy_base_get_hosts(self):
|
||||||
|
queue_items = []
|
||||||
|
def _queue_empty(*args, **kwargs):
|
||||||
|
return len(queue_items) == 0
|
||||||
|
def _queue_get(*args, **kwargs):
|
||||||
|
if len(queue_items) == 0:
|
||||||
|
raise Queue.Empty
|
||||||
|
else:
|
||||||
|
return queue_items.pop()
|
||||||
|
def _queue_put(item, *args, **kwargs):
|
||||||
|
queue_items.append(item)
|
||||||
|
|
||||||
|
mock_queue = MagicMock()
|
||||||
|
mock_queue.empty.side_effect = _queue_empty
|
||||||
|
mock_queue.get.side_effect = _queue_get
|
||||||
|
mock_queue.put.side_effect = _queue_put
|
||||||
|
|
||||||
mock_hosts = []
|
mock_hosts = []
|
||||||
for i in range(0, 5):
|
for i in range(0, 5):
|
||||||
mock_host = MagicMock()
|
mock_host = MagicMock()
|
||||||
|
@ -100,7 +150,7 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
mock_inventory.get_hosts.return_value = mock_hosts
|
mock_inventory.get_hosts.return_value = mock_hosts
|
||||||
|
|
||||||
mock_tqm = MagicMock()
|
mock_tqm = MagicMock()
|
||||||
mock_tqm._final_q = MagicMock()
|
mock_tqm._final_q = mock_queue
|
||||||
mock_tqm._notified_handlers = {}
|
mock_tqm._notified_handlers = {}
|
||||||
mock_tqm._listening_handlers = {}
|
mock_tqm._listening_handlers = {}
|
||||||
mock_tqm.get_inventory.return_value = mock_inventory
|
mock_tqm.get_inventory.return_value = mock_inventory
|
||||||
|
@ -120,45 +170,47 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
|
|
||||||
mock_tqm._unreachable_hosts = ["host02"]
|
mock_tqm._unreachable_hosts = ["host02"]
|
||||||
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
|
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
|
||||||
|
strategy_base.cleanup()
|
||||||
|
|
||||||
#@patch.object(WorkerProcess, 'run')
|
@patch.object(WorkerProcess, 'run')
|
||||||
#def test_strategy_base_queue_task(self, mock_worker):
|
def test_strategy_base_queue_task(self, mock_worker):
|
||||||
# def fake_run(self):
|
def fake_run(self):
|
||||||
# return
|
return
|
||||||
|
|
||||||
# mock_worker.run.side_effect = fake_run
|
mock_worker.run.side_effect = fake_run
|
||||||
|
|
||||||
# fake_loader = DictDataLoader()
|
fake_loader = DictDataLoader()
|
||||||
# mock_var_manager = MagicMock()
|
mock_var_manager = MagicMock()
|
||||||
# mock_host = MagicMock()
|
mock_host = MagicMock()
|
||||||
# mock_host.has_hostkey = True
|
mock_host.has_hostkey = True
|
||||||
# mock_inventory = MagicMock()
|
mock_inventory = MagicMock()
|
||||||
# mock_options = MagicMock()
|
mock_options = MagicMock()
|
||||||
# mock_options.module_path = None
|
mock_options.module_path = None
|
||||||
|
|
||||||
# tqm = TaskQueueManager(
|
tqm = TaskQueueManager(
|
||||||
# inventory=mock_inventory,
|
inventory=mock_inventory,
|
||||||
# variable_manager=mock_var_manager,
|
variable_manager=mock_var_manager,
|
||||||
# loader=fake_loader,
|
loader=fake_loader,
|
||||||
# options=mock_options,
|
options=mock_options,
|
||||||
# passwords=None,
|
passwords=None,
|
||||||
# )
|
)
|
||||||
# tqm._initialize_processes(3)
|
tqm._initialize_processes(3)
|
||||||
# tqm.hostvars = dict()
|
tqm.hostvars = dict()
|
||||||
|
|
||||||
|
try:
|
||||||
|
strategy_base = StrategyBase(tqm=tqm)
|
||||||
|
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
|
||||||
|
self.assertEqual(strategy_base._cur_worker, 1)
|
||||||
|
self.assertEqual(strategy_base._pending_results, 1)
|
||||||
|
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
|
||||||
|
self.assertEqual(strategy_base._cur_worker, 2)
|
||||||
|
self.assertEqual(strategy_base._pending_results, 2)
|
||||||
|
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
|
||||||
|
self.assertEqual(strategy_base._cur_worker, 0)
|
||||||
|
self.assertEqual(strategy_base._pending_results, 3)
|
||||||
|
finally:
|
||||||
|
tqm.cleanup()
|
||||||
|
|
||||||
# try:
|
|
||||||
# strategy_base = StrategyBase(tqm=tqm)
|
|
||||||
# strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
|
|
||||||
# self.assertEqual(strategy_base._cur_worker, 1)
|
|
||||||
# self.assertEqual(strategy_base._pending_results, 1)
|
|
||||||
# strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
|
|
||||||
# self.assertEqual(strategy_base._cur_worker, 2)
|
|
||||||
# self.assertEqual(strategy_base._pending_results, 2)
|
|
||||||
# strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
|
|
||||||
# self.assertEqual(strategy_base._cur_worker, 0)
|
|
||||||
# self.assertEqual(strategy_base._pending_results, 3)
|
|
||||||
# finally:
|
|
||||||
# tqm.cleanup()
|
|
||||||
|
|
||||||
def test_strategy_base_process_pending_results(self):
|
def test_strategy_base_process_pending_results(self):
|
||||||
mock_tqm = MagicMock()
|
mock_tqm = MagicMock()
|
||||||
|
@ -177,10 +229,13 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
raise Queue.Empty
|
raise Queue.Empty
|
||||||
else:
|
else:
|
||||||
return queue_items.pop()
|
return queue_items.pop()
|
||||||
|
def _queue_put(item, *args, **kwargs):
|
||||||
|
queue_items.append(item)
|
||||||
|
|
||||||
mock_queue = MagicMock()
|
mock_queue = MagicMock()
|
||||||
mock_queue.empty.side_effect = _queue_empty
|
mock_queue.empty.side_effect = _queue_empty
|
||||||
mock_queue.get.side_effect = _queue_get
|
mock_queue.get.side_effect = _queue_get
|
||||||
|
mock_queue.put.side_effect = _queue_put
|
||||||
mock_tqm._final_q = mock_queue
|
mock_tqm._final_q = mock_queue
|
||||||
|
|
||||||
mock_tqm._stats = MagicMock()
|
mock_tqm._stats = MagicMock()
|
||||||
|
@ -271,7 +326,7 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
strategy_base._blocked_hosts['test01'] = True
|
strategy_base._blocked_hosts['test01'] = True
|
||||||
strategy_base._pending_results = 1
|
strategy_base._pending_results = 1
|
||||||
mock_iterator.is_failed.return_value = True
|
mock_iterator.is_failed.return_value = True
|
||||||
results = strategy_base._process_pending_results(iterator=mock_iterator)
|
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
|
||||||
self.assertEqual(len(results), 1)
|
self.assertEqual(len(results), 1)
|
||||||
self.assertEqual(results[0], task_result)
|
self.assertEqual(results[0], task_result)
|
||||||
self.assertEqual(strategy_base._pending_results, 0)
|
self.assertEqual(strategy_base._pending_results, 0)
|
||||||
|
@ -305,7 +360,7 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
|
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
|
||||||
strategy_base._blocked_hosts['test01'] = True
|
strategy_base._blocked_hosts['test01'] = True
|
||||||
strategy_base._pending_results = 1
|
strategy_base._pending_results = 1
|
||||||
results = strategy_base._process_pending_results(iterator=mock_iterator)
|
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
|
||||||
self.assertEqual(len(results), 1)
|
self.assertEqual(len(results), 1)
|
||||||
self.assertEqual(strategy_base._pending_results, 0)
|
self.assertEqual(strategy_base._pending_results, 0)
|
||||||
self.assertNotIn('test01', strategy_base._blocked_hosts)
|
self.assertNotIn('test01', strategy_base._blocked_hosts)
|
||||||
|
@ -313,7 +368,7 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo'))))
|
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo'))))
|
||||||
strategy_base._blocked_hosts['test01'] = True
|
strategy_base._blocked_hosts['test01'] = True
|
||||||
strategy_base._pending_results = 1
|
strategy_base._pending_results = 1
|
||||||
results = strategy_base._process_pending_results(iterator=mock_iterator)
|
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
|
||||||
self.assertEqual(len(results), 1)
|
self.assertEqual(len(results), 1)
|
||||||
self.assertEqual(strategy_base._pending_results, 0)
|
self.assertEqual(strategy_base._pending_results, 0)
|
||||||
self.assertNotIn('test01', strategy_base._blocked_hosts)
|
self.assertNotIn('test01', strategy_base._blocked_hosts)
|
||||||
|
@ -321,7 +376,7 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler'])))
|
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler'])))
|
||||||
strategy_base._blocked_hosts['test01'] = True
|
strategy_base._blocked_hosts['test01'] = True
|
||||||
strategy_base._pending_results = 1
|
strategy_base._pending_results = 1
|
||||||
results = strategy_base._process_pending_results(iterator=mock_iterator)
|
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
|
||||||
self.assertEqual(len(results), 1)
|
self.assertEqual(len(results), 1)
|
||||||
self.assertEqual(strategy_base._pending_results, 0)
|
self.assertEqual(strategy_base._pending_results, 0)
|
||||||
self.assertNotIn('test01', strategy_base._blocked_hosts)
|
self.assertNotIn('test01', strategy_base._blocked_hosts)
|
||||||
|
@ -340,6 +395,7 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
|
|
||||||
#queue_items.append(('bad'))
|
#queue_items.append(('bad'))
|
||||||
#self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
|
#self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
|
||||||
|
strategy_base.cleanup()
|
||||||
|
|
||||||
def test_strategy_base_load_included_file(self):
|
def test_strategy_base_load_included_file(self):
|
||||||
fake_loader = DictDataLoader({
|
fake_loader = DictDataLoader({
|
||||||
|
@ -350,13 +406,30 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
""",
|
""",
|
||||||
})
|
})
|
||||||
|
|
||||||
|
queue_items = []
|
||||||
|
def _queue_empty(*args, **kwargs):
|
||||||
|
return len(queue_items) == 0
|
||||||
|
def _queue_get(*args, **kwargs):
|
||||||
|
if len(queue_items) == 0:
|
||||||
|
raise Queue.Empty
|
||||||
|
else:
|
||||||
|
return queue_items.pop()
|
||||||
|
def _queue_put(item, *args, **kwargs):
|
||||||
|
queue_items.append(item)
|
||||||
|
|
||||||
|
mock_queue = MagicMock()
|
||||||
|
mock_queue.empty.side_effect = _queue_empty
|
||||||
|
mock_queue.get.side_effect = _queue_get
|
||||||
|
mock_queue.put.side_effect = _queue_put
|
||||||
|
|
||||||
mock_tqm = MagicMock()
|
mock_tqm = MagicMock()
|
||||||
mock_tqm._final_q = MagicMock()
|
mock_tqm._final_q = mock_queue
|
||||||
mock_tqm._notified_handlers = {}
|
mock_tqm._notified_handlers = {}
|
||||||
mock_tqm._listening_handlers = {}
|
mock_tqm._listening_handlers = {}
|
||||||
|
|
||||||
strategy_base = StrategyBase(tqm=mock_tqm)
|
strategy_base = StrategyBase(tqm=mock_tqm)
|
||||||
strategy_base._loader = fake_loader
|
strategy_base._loader = fake_loader
|
||||||
|
strategy_base.cleanup()
|
||||||
|
|
||||||
mock_play = MagicMock()
|
mock_play = MagicMock()
|
||||||
|
|
||||||
|
@ -442,4 +515,5 @@ class TestStrategyBase(unittest.TestCase):
|
||||||
|
|
||||||
result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
|
result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
|
||||||
finally:
|
finally:
|
||||||
|
strategy_base.cleanup()
|
||||||
tqm.cleanup()
|
tqm.cleanup()
|
||||||
|
|
|
@ -25,7 +25,7 @@ from ansible.compat.tests.mock import patch, MagicMock
|
||||||
from ansible import constants as C
|
from ansible import constants as C
|
||||||
from ansible.errors import *
|
from ansible.errors import *
|
||||||
from ansible.plugins import filter_loader, lookup_loader, module_loader
|
from ansible.plugins import filter_loader, lookup_loader, module_loader
|
||||||
from ansible.executor.task_queue_manager import SharedPluginLoaderObj
|
from ansible.plugins.strategy import SharedPluginLoaderObj
|
||||||
from ansible.template import Templar
|
from ansible.template import Templar
|
||||||
|
|
||||||
from units.mock.loader import DictDataLoader
|
from units.mock.loader import DictDataLoader
|
||||||
|
|
Loading…
Reference in a new issue