mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Merge pull request #11708 from bcoca/display_me
adding display to plugins and start moving debug to display
This commit is contained in:
commit
4e3f5e3be6
20 changed files with 152 additions and 109 deletions
|
@ -34,8 +34,6 @@ from ansible.playbook.play_context import PlayContext
|
|||
from ansible.plugins import callback_loader, strategy_loader
|
||||
from ansible.template import Templar
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
__all__ = ['TaskQueueManager']
|
||||
|
||||
class TaskQueueManager:
|
||||
|
@ -194,7 +192,7 @@ class TaskQueueManager:
|
|||
return strategy.run(iterator, play_context)
|
||||
|
||||
def cleanup(self):
|
||||
debug("RUNNING CLEANUP")
|
||||
self._display.debug("RUNNING CLEANUP")
|
||||
|
||||
self.terminate()
|
||||
|
||||
|
|
|
@ -28,10 +28,15 @@ import os.path
|
|||
import sys
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible import errors
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
MODULE_CACHE = {}
|
||||
PATH_CACHE = {}
|
||||
PLUGIN_PATH_CACHE = {}
|
||||
|
@ -225,8 +230,7 @@ class PluginLoader:
|
|||
try:
|
||||
full_paths = (os.path.join(path, f) for f in os.listdir(path))
|
||||
except OSError as e:
|
||||
d = Display()
|
||||
d.warning("Error accessing plugin paths: %s" % str(e))
|
||||
display.warning("Error accessing plugin paths: %s" % str(e))
|
||||
for full_path in (f for f in full_paths if os.path.isfile(f)):
|
||||
for suffix in suffixes:
|
||||
if full_path.endswith(suffix):
|
||||
|
@ -249,8 +253,7 @@ class PluginLoader:
|
|||
# We've already cached all the paths at this point
|
||||
if alias_name in self._plugin_path_cache:
|
||||
if not os.path.islink(self._plugin_path_cache[alias_name]):
|
||||
d = Display()
|
||||
d.deprecated('%s is kept for backwards compatibility '
|
||||
display.deprecated('%s is kept for backwards compatibility '
|
||||
'but usage is discouraged. The module '
|
||||
'documentation details page may explain '
|
||||
'more about this rationale.' %
|
||||
|
|
|
@ -31,9 +31,14 @@ from ansible import constants as C
|
|||
from ansible.errors import AnsibleError
|
||||
from ansible.executor.module_common import modify_module
|
||||
from ansible.parsing.utils.jsonify import jsonify
|
||||
from ansible.utils.debug import debug
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
class ActionBase:
|
||||
|
||||
'''
|
||||
|
@ -50,6 +55,7 @@ class ActionBase:
|
|||
self._loader = loader
|
||||
self._templar = templar
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
self._display = display
|
||||
|
||||
self._supports_check_mode = True
|
||||
|
||||
|
@ -142,9 +148,9 @@ class ActionBase:
|
|||
tmp_mode = 'a+rx'
|
||||
|
||||
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
|
||||
debug("executing _low_level_execute_command to create the tmp path")
|
||||
self._display.debug("executing _low_level_execute_command to create the tmp path")
|
||||
result = self._low_level_execute_command(cmd, None, sudoable=False)
|
||||
debug("done with creation of tmp path")
|
||||
self._display.debug("done with creation of tmp path")
|
||||
|
||||
# error handling on this seems a little aggressive?
|
||||
if result['rc'] != 0:
|
||||
|
@ -183,9 +189,9 @@ class ActionBase:
|
|||
cmd = self._connection._shell.remove(tmp_path, recurse=True)
|
||||
# If we have gotten here we have a working ssh configuration.
|
||||
# If ssh breaks we could leave tmp directories out on the remote system.
|
||||
debug("calling _low_level_execute_command to remove the tmp path")
|
||||
self._display.debug("calling _low_level_execute_command to remove the tmp path")
|
||||
self._low_level_execute_command(cmd, None, sudoable=False)
|
||||
debug("done removing the tmp path")
|
||||
self._display.debug("done removing the tmp path")
|
||||
|
||||
def _transfer_data(self, remote_path, data):
|
||||
'''
|
||||
|
@ -220,9 +226,9 @@ class ActionBase:
|
|||
'''
|
||||
|
||||
cmd = self._connection._shell.chmod(mode, path)
|
||||
debug("calling _low_level_execute_command to chmod the remote path")
|
||||
self._display.debug("calling _low_level_execute_command to chmod the remote path")
|
||||
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable)
|
||||
debug("done with chmod call")
|
||||
self._display.debug("done with chmod call")
|
||||
return res
|
||||
|
||||
def _remote_checksum(self, tmp, path):
|
||||
|
@ -235,9 +241,9 @@ class ActionBase:
|
|||
#python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python')
|
||||
python_interp = 'python'
|
||||
cmd = self._connection._shell.checksum(path, python_interp)
|
||||
debug("calling _low_level_execute_command to get the remote checksum")
|
||||
self._display.debug("calling _low_level_execute_command to get the remote checksum")
|
||||
data = self._low_level_execute_command(cmd, tmp, sudoable=True)
|
||||
debug("done getting the remote checksum")
|
||||
self._display.debug("done getting the remote checksum")
|
||||
# FIXME: implement this function?
|
||||
#data2 = utils.last_non_blank_line(data['stdout'])
|
||||
try:
|
||||
|
@ -271,9 +277,9 @@ class ActionBase:
|
|||
expand_path = '~%s' % self._play_context.become_user
|
||||
|
||||
cmd = self._connection._shell.expand_user(expand_path)
|
||||
debug("calling _low_level_execute_command to expand the remote user path")
|
||||
self._display.debug("calling _low_level_execute_command to expand the remote user path")
|
||||
data = self._low_level_execute_command(cmd, tmp, sudoable=False)
|
||||
debug("done expanding the remote user path")
|
||||
self._display.debug("done expanding the remote user path")
|
||||
#initial_fragment = utils.last_non_blank_line(data['stdout'])
|
||||
initial_fragment = data['stdout'].strip().splitlines()[-1]
|
||||
|
||||
|
@ -326,7 +332,7 @@ class ActionBase:
|
|||
if self._play_context.no_log:
|
||||
module_args['_ansible_no_log'] = True
|
||||
|
||||
debug("in _execute_module (%s, %s)" % (module_name, module_args))
|
||||
self._display.debug("in _execute_module (%s, %s)" % (module_name, module_args))
|
||||
|
||||
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
|
||||
if not shebang:
|
||||
|
@ -341,9 +347,9 @@ class ActionBase:
|
|||
# FIXME: async stuff here?
|
||||
#if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES):
|
||||
if remote_module_path:
|
||||
debug("transferring module to remote")
|
||||
self._display.debug("transferring module to remote")
|
||||
self._transfer_data(remote_module_path, module_data)
|
||||
debug("done transferring module to remote")
|
||||
self._display.debug("done transferring module to remote")
|
||||
|
||||
environment_string = self._compute_environment_string()
|
||||
|
||||
|
@ -378,9 +384,9 @@ class ActionBase:
|
|||
# specified in the play, not the sudo_user
|
||||
sudoable = False
|
||||
|
||||
debug("calling _low_level_execute_command() for command %s" % cmd)
|
||||
self._display.debug("calling _low_level_execute_command() for command %s" % cmd)
|
||||
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable, in_data=in_data)
|
||||
debug("_low_level_execute_command returned ok")
|
||||
self._display.debug("_low_level_execute_command returned ok")
|
||||
|
||||
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
|
@ -413,7 +419,7 @@ class ActionBase:
|
|||
module_name = module_name,
|
||||
)
|
||||
|
||||
debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||
self._display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||
return data
|
||||
|
||||
def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None, executable=None):
|
||||
|
@ -426,18 +432,18 @@ class ActionBase:
|
|||
if executable is not None:
|
||||
cmd = executable + ' -c ' + cmd
|
||||
|
||||
debug("in _low_level_execute_command() (%s)" % (cmd,))
|
||||
self._display.debug("in _low_level_execute_command() (%s)" % (cmd,))
|
||||
if not cmd:
|
||||
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
|
||||
debug("no command, exiting _low_level_execute_command()")
|
||||
self._display.debug("no command, exiting _low_level_execute_command()")
|
||||
return dict(stdout='', stderr='')
|
||||
|
||||
if sudoable:
|
||||
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
|
||||
|
||||
debug("executing the command %s through the connection" % cmd)
|
||||
self._display.debug("executing the command %s through the connection" % cmd)
|
||||
rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, in_data=in_data, sudoable=sudoable)
|
||||
debug("command execution done")
|
||||
self._display.debug("command execution done")
|
||||
|
||||
if not isinstance(stdout, basestring):
|
||||
out = ''.join(stdout.readlines())
|
||||
|
@ -449,7 +455,7 @@ class ActionBase:
|
|||
else:
|
||||
err = stderr
|
||||
|
||||
debug("done with _low_level_execute_command() (%s)" % (cmd,))
|
||||
self._display.debug("done with _low_level_execute_command() (%s)" % (cmd,))
|
||||
if rc is None:
|
||||
rc = 0
|
||||
|
||||
|
@ -457,7 +463,7 @@ class ActionBase:
|
|||
|
||||
def _get_first_available_file(self, faf, of=None, searchdir='files'):
|
||||
|
||||
self._connection._display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
|
||||
self._display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
|
||||
for fn in faf:
|
||||
fn_orig = fn
|
||||
fnt = self._templar.template(fn)
|
||||
|
|
10
lib/ansible/plugins/cache/__init__.py
vendored
10
lib/ansible/plugins/cache/__init__.py
vendored
|
@ -22,12 +22,20 @@ from collections import MutableMapping
|
|||
from ansible import constants as C
|
||||
from ansible.plugins import cache_loader
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
class FactCache(MutableMapping):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._plugin = cache_loader.get(C.CACHE_PLUGIN)
|
||||
self._display = display
|
||||
|
||||
if self._plugin is None:
|
||||
# FIXME: this should be an exception
|
||||
self._display.warning("Failed to load fact cache plugins")
|
||||
return
|
||||
|
||||
def __getitem__(self, key):
|
||||
|
|
8
lib/ansible/plugins/cache/base.py
vendored
8
lib/ansible/plugins/cache/base.py
vendored
|
@ -22,9 +22,17 @@ from abc import ABCMeta, abstractmethod
|
|||
|
||||
from six import with_metaclass
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class BaseCacheModule(with_metaclass(ABCMeta, object)):
|
||||
|
||||
display = display
|
||||
|
||||
@abstractmethod
|
||||
def get(self, key):
|
||||
pass
|
||||
|
|
18
lib/ansible/plugins/cache/jsonfile.py
vendored
18
lib/ansible/plugins/cache/jsonfile.py
vendored
|
@ -46,8 +46,7 @@ class CacheModule(BaseCacheModule):
|
|||
try:
|
||||
os.makedirs(self._cache_dir)
|
||||
except (OSError,IOError), e:
|
||||
# FIXME: this is in display now, but cache plugins don't have that
|
||||
#utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
|
||||
self._display.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
|
||||
return None
|
||||
|
||||
def get(self, key):
|
||||
|
@ -62,8 +61,7 @@ class CacheModule(BaseCacheModule):
|
|||
try:
|
||||
f = codecs.open(cachefile, 'r', encoding='utf-8')
|
||||
except (OSError,IOError), e:
|
||||
# FIXME: this is in display now, but cache plugins don't have that
|
||||
#utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
|
||||
self._display.warning("error while trying to read %s : %s" % (cachefile, str(e)))
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
|
@ -71,8 +69,7 @@ class CacheModule(BaseCacheModule):
|
|||
self._cache[key] = value
|
||||
return value
|
||||
except ValueError:
|
||||
# FIXME: this is in display now, but cache plugins don't have that
|
||||
#utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
|
||||
self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
|
||||
raise KeyError
|
||||
finally:
|
||||
f.close()
|
||||
|
@ -85,8 +82,7 @@ class CacheModule(BaseCacheModule):
|
|||
try:
|
||||
f = codecs.open(cachefile, 'w', encoding='utf-8')
|
||||
except (OSError,IOError), e:
|
||||
# FIXME: this is in display now, but cache plugins don't have that
|
||||
#utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
|
||||
self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
|
||||
pass
|
||||
else:
|
||||
f.write(jsonify(value))
|
||||
|
@ -102,8 +98,7 @@ class CacheModule(BaseCacheModule):
|
|||
if e.errno == errno.ENOENT:
|
||||
return False
|
||||
else:
|
||||
# FIXME: this is in display now, but cache plugins don't have that
|
||||
#utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
|
||||
self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
|
||||
pass
|
||||
|
||||
if time.time() - st.st_mtime <= self._timeout:
|
||||
|
@ -135,8 +130,7 @@ class CacheModule(BaseCacheModule):
|
|||
if e.errno == errno.ENOENT:
|
||||
return False
|
||||
else:
|
||||
# FIXME: this is in display now, but cache plugins don't have that
|
||||
#utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
|
||||
self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
|
||||
pass
|
||||
|
||||
def delete(self, key):
|
||||
|
|
3
lib/ansible/plugins/cache/redis.py
vendored
3
lib/ansible/plugins/cache/redis.py
vendored
|
@ -28,8 +28,7 @@ from ansible.plugins.cache.base import BaseCacheModule
|
|||
try:
|
||||
from redis import StrictRedis
|
||||
except ImportError:
|
||||
print("The 'redis' python module is required, 'pip install redis'")
|
||||
sys.exit(1)
|
||||
raise AnsibleError("The 'redis' python module is required for the redis fact cache, 'pip install redis'")
|
||||
|
||||
class CacheModule(BaseCacheModule):
|
||||
"""
|
||||
|
|
|
@ -33,11 +33,11 @@ from ansible import constants as C
|
|||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins import shell_loader
|
||||
|
||||
# FIXME: this object should be created upfront and passed through
|
||||
# the entire chain of calls to here, as there are other things
|
||||
# which may want to output display/logs too
|
||||
from ansible.utils.display import Display
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
__all__ = ['ConnectionBase', 'ensure_connect']
|
||||
|
||||
|
@ -65,7 +65,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
if not hasattr(self, '_new_stdin'):
|
||||
self._new_stdin = new_stdin
|
||||
if not hasattr(self, '_display'):
|
||||
self._display = Display(verbosity=play_context.verbosity)
|
||||
self._display = display
|
||||
if not hasattr(self, '_connected'):
|
||||
self._connected = False
|
||||
|
||||
|
|
|
@ -30,8 +30,6 @@ import ansible.constants as C
|
|||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.plugins.connections import ConnectionBase
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local based connections '''
|
||||
|
||||
|
@ -53,7 +51,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
debug("in local.exec_command()")
|
||||
self._display.debug("in local.exec_command()")
|
||||
|
||||
if in_data:
|
||||
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
@ -61,7 +59,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
self._display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
|
||||
# FIXME: cwd= needs to be set to the basedir of the playbook
|
||||
debug("opening command with Popen()")
|
||||
self._display.debug("opening command with Popen()")
|
||||
p = subprocess.Popen(
|
||||
cmd,
|
||||
shell=isinstance(cmd, basestring),
|
||||
|
@ -70,7 +68,7 @@ class Connection(ConnectionBase):
|
|||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
debug("done running command with Popen()")
|
||||
self._display.debug("done running command with Popen()")
|
||||
|
||||
if self._play_context.prompt and self._play_context.become_pass:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
|
@ -95,11 +93,11 @@ class Connection(ConnectionBase):
|
|||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
debug("getting output with communicate()")
|
||||
self._display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate()
|
||||
debug("done communicating")
|
||||
self._display.debug("done communicating")
|
||||
|
||||
debug("done with local.exec_command()")
|
||||
self._display.debug("done with local.exec_command()")
|
||||
return (p.returncode, '', stdout, stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
|
|
|
@ -43,7 +43,6 @@ from ansible import constants as C
|
|||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.plugins.connections import ConnectionBase
|
||||
from ansible.utils.path import makedirs_safe
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
AUTHENTICITY_MSG="""
|
||||
paramiko: The authenticity of host '%s' can't be established.
|
||||
|
@ -226,7 +225,7 @@ class Connection(ConnectionBase):
|
|||
if self._play_context.prompt:
|
||||
if self._play_context.become and self._play_context.become_pass:
|
||||
while True:
|
||||
debug('Waiting for Privilege Escalation input')
|
||||
self._display.debug('Waiting for Privilege Escalation input')
|
||||
if self.check_become_success(become_output) or self.check_password_prompt(become_output):
|
||||
break
|
||||
chunk = chan.recv(bufsize)
|
||||
|
|
|
@ -37,7 +37,6 @@ from hashlib import sha1
|
|||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.plugins.connections import ConnectionBase
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' ssh based connections '''
|
||||
|
@ -367,7 +366,7 @@ class Connection(ConnectionBase):
|
|||
* detect prompt on stderr (no-tty)
|
||||
'''
|
||||
|
||||
debug("Handling privilege escalation password prompt.")
|
||||
self._display.debug("Handling privilege escalation password prompt.")
|
||||
|
||||
if self._play_context.become and self._play_context.become_pass:
|
||||
|
||||
|
@ -377,7 +376,7 @@ class Connection(ConnectionBase):
|
|||
become_output = ''
|
||||
become_errput = ''
|
||||
while True:
|
||||
debug('Waiting for Privilege Escalation input')
|
||||
self._display.debug('Waiting for Privilege Escalation input')
|
||||
if self.check_become_success(become_output) or self.check_password_prompt(become_output):
|
||||
break
|
||||
|
||||
|
@ -400,7 +399,7 @@ class Connection(ConnectionBase):
|
|||
raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output)
|
||||
|
||||
if not self.check_become_success(become_output):
|
||||
debug("Sending privilege escalation password.")
|
||||
self._display.debug("Sending privilege escalation password.")
|
||||
stdin.write(self._play_context.become_pass + '\n')
|
||||
else:
|
||||
no_prompt_out = become_output
|
||||
|
|
|
@ -94,7 +94,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
endpoint = parse.urlunsplit((scheme, netloc, '/wsman', '', ''))
|
||||
|
||||
self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._play_context.remote_addr)
|
||||
self._display.debug('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._play_context.remote_addr)
|
||||
protocol = Protocol(
|
||||
endpoint,
|
||||
transport=transport,
|
||||
|
@ -117,16 +117,16 @@ class Connection(ConnectionBase):
|
|||
raise AnsibleError("the username/password specified for this server was incorrect")
|
||||
elif code == 411:
|
||||
return protocol
|
||||
self._display.vvvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self._play_context.remote_addr)
|
||||
self._display.debug('WINRM CONNECTION ERROR: %s' % err_msg, host=self._play_context.remote_addr)
|
||||
continue
|
||||
if exc:
|
||||
raise AnsibleError(str(exc))
|
||||
|
||||
def _winrm_exec(self, command, args=(), from_exec=False):
|
||||
if from_exec:
|
||||
self._display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
|
||||
self._display.debug("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
|
||||
else:
|
||||
self._display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
|
||||
self._display.debugv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
|
||||
if not self.protocol:
|
||||
self.protocol = self._winrm_connect()
|
||||
if not self.shell_id:
|
||||
|
@ -136,11 +136,11 @@ class Connection(ConnectionBase):
|
|||
command_id = self.protocol.run_command(self.shell_id, command, args)
|
||||
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
|
||||
if from_exec:
|
||||
self._display.vvvvv('WINRM RESULT %r' % response, host=self._play_context.remote_addr)
|
||||
self._display.debug('WINRM RESULT %r' % response, host=self._play_context.remote_addr)
|
||||
else:
|
||||
self._display.vvvvvv('WINRM RESULT %r' % response, host=self._play_context.remote_addr)
|
||||
self._display.vvvvvv('WINRM STDOUT %s' % response.std_out, host=self._play_context.remote_addr)
|
||||
self._display.vvvvvv('WINRM STDERR %s' % response.std_err, host=self._play_context.remote_addr)
|
||||
self._display.debugv('WINRM RESULT %r' % response, host=self._play_context.remote_addr)
|
||||
self._display.debugv('WINRM STDOUT %s' % response.std_out, host=self._play_context.remote_addr)
|
||||
self._display.debugv('WINRM STDERR %s' % response.std_err, host=self._play_context.remote_addr)
|
||||
return response
|
||||
finally:
|
||||
if command_id:
|
||||
|
@ -206,7 +206,7 @@ class Connection(ConnectionBase):
|
|||
out_path = out_path + '.ps1'
|
||||
b64_data = base64.b64encode(out_data)
|
||||
script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size)
|
||||
self._display.vvvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self._play_context.remote_addr)
|
||||
self._display.debug("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self._play_context.remote_addr)
|
||||
cmd_parts = self._shell._encode_script(script, as_list=True)
|
||||
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
|
||||
if result.status_code != 0:
|
||||
|
@ -248,7 +248,7 @@ class Connection(ConnectionBase):
|
|||
Exit 1;
|
||||
}
|
||||
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
|
||||
self._display.vvvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self._play_context.remote_addr)
|
||||
self._display.debug("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self._play_context.remote_addr)
|
||||
cmd_parts = self._shell._encode_script(script, as_list=True)
|
||||
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
|
||||
if result.status_code != 0:
|
||||
|
|
|
@ -19,11 +19,18 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
__all__ = ['LookupBase']
|
||||
|
||||
class LookupBase:
|
||||
def __init__(self, loader=None, **kwargs):
|
||||
self._loader = loader
|
||||
self._display = display
|
||||
|
||||
def _flatten(self, terms):
|
||||
ret = []
|
||||
|
|
|
@ -38,6 +38,7 @@ class LookupModule(LookupBase):
|
|||
basedir = self._loader.get_basedir()
|
||||
|
||||
for term in terms:
|
||||
self._display.debug("File lookup term: %s" % term)
|
||||
|
||||
# Special handling of the file lookup, used primarily when the
|
||||
# lookup is done from a role. If the file isn't found in the
|
||||
|
@ -46,6 +47,7 @@ class LookupModule(LookupBase):
|
|||
# itself (which will be relative to the current working dir)
|
||||
|
||||
lookupfile = self._loader.path_dwim_relative(basedir, 'files', term)
|
||||
self._display.vvvv("File lookup using %s as file" % lookupfile)
|
||||
try:
|
||||
if lookupfile:
|
||||
contents, show_data = self._loader._get_file_contents(lookupfile)
|
||||
|
|
|
@ -40,8 +40,10 @@ class LookupModule(LookupBase):
|
|||
basedir = self._loader.get_basedir()
|
||||
|
||||
for term in terms:
|
||||
self._display.debug("File lookup term: %s" % term)
|
||||
|
||||
lookupfile = self._loader.path_dwim_relative(basedir, 'templates', term)
|
||||
self._display.vvvv("File lookup using %s as file" % lookupfile)
|
||||
if lookupfile and os.path.exists(lookupfile):
|
||||
with open(lookupfile, 'r') as f:
|
||||
template_data = f.read()
|
||||
|
|
|
@ -36,6 +36,7 @@ class LookupModule(LookupBase):
|
|||
|
||||
ret = []
|
||||
for term in terms:
|
||||
self._display.vvvv("url lookup connecting to %s" % term)
|
||||
try:
|
||||
response = open_url(term, validate_certs=validate_certs)
|
||||
except urllib2.URLError as e:
|
||||
|
|
|
@ -31,8 +31,12 @@ from ansible.playbook.helpers import load_list_of_blocks
|
|||
from ansible.playbook.role import hash_params
|
||||
from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
__all__ = ['StrategyBase']
|
||||
|
||||
|
@ -65,6 +69,7 @@ class StrategyBase:
|
|||
self._variable_manager = tqm.get_variable_manager()
|
||||
self._loader = tqm.get_loader()
|
||||
self._final_q = tqm._final_q
|
||||
self._display = display
|
||||
|
||||
# internal counters
|
||||
self._pending_results = 0
|
||||
|
@ -80,7 +85,7 @@ class StrategyBase:
|
|||
failed_hosts = self._tqm._failed_hosts.keys()
|
||||
unreachable_hosts = self._tqm._unreachable_hosts.keys()
|
||||
|
||||
debug("running handlers")
|
||||
self._display.debug("running handlers")
|
||||
result &= self.run_handlers(iterator, play_context)
|
||||
|
||||
# now update with the hosts (if any) that failed or were
|
||||
|
@ -120,12 +125,12 @@ class StrategyBase:
|
|||
def _queue_task(self, host, task, task_vars, play_context):
|
||||
''' handles queueing the task up to be sent to a worker '''
|
||||
|
||||
debug("entering _queue_task() for %s/%s" % (host, task))
|
||||
self._display.debug("entering _queue_task() for %s/%s" % (host, task))
|
||||
|
||||
# and then queue the new task
|
||||
debug("%s - putting task (%s) in queue" % (host, task))
|
||||
self._display.debug("%s - putting task (%s) in queue" % (host, task))
|
||||
try:
|
||||
debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
|
||||
self._display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
|
||||
|
||||
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
|
||||
self._cur_worker += 1
|
||||
|
@ -140,9 +145,9 @@ class StrategyBase:
|
|||
self._pending_results += 1
|
||||
except (EOFError, IOError, AssertionError) as e:
|
||||
# most likely an abort
|
||||
debug("got an error while queuing: %s" % e)
|
||||
self._display.debug("got an error while queuing: %s" % e)
|
||||
return
|
||||
debug("exiting _queue_task() for %s/%s" % (host, task))
|
||||
self._display.debug("exiting _queue_task() for %s/%s" % (host, task))
|
||||
|
||||
def _process_pending_results(self, iterator):
|
||||
'''
|
||||
|
@ -155,7 +160,7 @@ class StrategyBase:
|
|||
while not self._final_q.empty() and not self._tqm._terminated:
|
||||
try:
|
||||
result = self._final_q.get(block=False)
|
||||
debug("got result from result worker: %s" % ([unicode(x) for x in result],))
|
||||
self._display.debug("got result from result worker: %s" % ([unicode(x) for x in result],))
|
||||
|
||||
# all host status messages contain 2 entries: (msg, task_result)
|
||||
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
|
||||
|
@ -164,7 +169,7 @@ class StrategyBase:
|
|||
task = task_result._task
|
||||
if result[0] == 'host_task_failed' or task_result.is_failed():
|
||||
if not task.ignore_errors:
|
||||
debug("marking %s as failed" % host.name)
|
||||
self._display.debug("marking %s as failed" % host.name)
|
||||
iterator.mark_host_failed(host)
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
self._tqm._stats.increment('failures', host.name)
|
||||
|
@ -275,12 +280,12 @@ class StrategyBase:
|
|||
|
||||
ret_results = []
|
||||
|
||||
debug("waiting for pending results...")
|
||||
self._display.debug("waiting for pending results...")
|
||||
while self._pending_results > 0 and not self._tqm._terminated:
|
||||
results = self._process_pending_results(iterator)
|
||||
ret_results.extend(results)
|
||||
time.sleep(0.01)
|
||||
debug("no more pending results, returning what we have")
|
||||
self._display.debug("no more pending results, returning what we have")
|
||||
|
||||
return ret_results
|
||||
|
||||
|
@ -436,5 +441,5 @@ class StrategyBase:
|
|||
self._wait_on_pending_results(iterator)
|
||||
# wipe the notification list
|
||||
self._notified_handlers[handler_name] = []
|
||||
debug("done running handlers, result is: %s" % result)
|
||||
self._display.debug("done running handlers, result is: %s" % result)
|
||||
return result
|
||||
|
|
|
@ -22,7 +22,12 @@ __metaclass__ = type
|
|||
import time
|
||||
|
||||
from ansible.plugins.strategies import StrategyBase
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
class StrategyModule(StrategyBase):
|
||||
|
||||
|
@ -62,21 +67,21 @@ class StrategyModule(StrategyBase):
|
|||
host_results = []
|
||||
while True:
|
||||
host = hosts_left[last_host]
|
||||
debug("next free host: %s" % host)
|
||||
self._display.debug("next free host: %s" % host)
|
||||
host_name = host.get_name()
|
||||
|
||||
# peek at the next task for the host, to see if there's
|
||||
# anything to do do for this host
|
||||
(state, task) = iterator.get_next_task_for_host(host, peek=True)
|
||||
debug("free host state: %s" % state)
|
||||
debug("free host task: %s" % task)
|
||||
self._display.debug("free host state: %s" % state)
|
||||
self._display.debug("free host task: %s" % task)
|
||||
if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
|
||||
|
||||
# set the flag so the outer loop knows we've still found
|
||||
# some work which needs to be done
|
||||
work_to_do = True
|
||||
|
||||
debug("this host has work to do")
|
||||
self._display.debug("this host has work to do")
|
||||
|
||||
# check to see if this host is blocked (still executing a previous task)
|
||||
if not host_name in self._blocked_hosts:
|
||||
|
@ -84,9 +89,9 @@ class StrategyModule(StrategyBase):
|
|||
self._blocked_hosts[host_name] = True
|
||||
(state, task) = iterator.get_next_task_for_host(host)
|
||||
|
||||
debug("getting variables")
|
||||
self._display.debug("getting variables")
|
||||
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
|
||||
debug("done getting variables")
|
||||
self._display.debug("done getting variables")
|
||||
|
||||
# check to see if this task should be skipped, due to it being a member of a
|
||||
# role which has already run (and whether that role allows duplicate execution)
|
||||
|
@ -94,11 +99,11 @@ class StrategyModule(StrategyBase):
|
|||
# If there is no metadata, the default behavior is to not allow duplicates,
|
||||
# if there is metadata, check to see if the allow_duplicates flag was set to true
|
||||
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
|
||||
debug("'%s' skipped because role has already run" % task)
|
||||
self._display.debug("'%s' skipped because role has already run" % task)
|
||||
continue
|
||||
|
||||
if not task.evaluate_tags(play_context.only_tags, play_context.skip_tags, task_vars) and task.action != 'setup':
|
||||
debug("'%s' failed tag evaluation" % task)
|
||||
self._display.debug("'%s' failed tag evaluation" % task)
|
||||
continue
|
||||
|
||||
if task.action == 'meta':
|
||||
|
|
|
@ -27,7 +27,6 @@ from ansible.playbook.task import Task
|
|||
from ansible.plugins import action_loader
|
||||
from ansible.plugins.strategies import StrategyBase
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
class StrategyModule(StrategyBase):
|
||||
|
||||
|
@ -132,9 +131,9 @@ class StrategyModule(StrategyBase):
|
|||
while work_to_do and not self._tqm._terminated:
|
||||
|
||||
try:
|
||||
debug("getting the remaining hosts for this loop")
|
||||
self._display.debug("getting the remaining hosts for this loop")
|
||||
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
|
||||
debug("done getting the remaining hosts for this loop")
|
||||
self._display.debug("done getting the remaining hosts for this loop")
|
||||
|
||||
# queue up this task for each host in the inventory
|
||||
callback_sent = False
|
||||
|
@ -169,7 +168,7 @@ class StrategyModule(StrategyBase):
|
|||
# If there is no metadata, the default behavior is to not allow duplicates,
|
||||
# if there is metadata, check to see if the allow_duplicates flag was set to true
|
||||
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
|
||||
debug("'%s' skipped because role has already run" % task)
|
||||
self._display.debug("'%s' skipped because role has already run" % task)
|
||||
continue
|
||||
|
||||
if task.action == 'meta':
|
||||
|
@ -184,11 +183,11 @@ class StrategyModule(StrategyBase):
|
|||
else:
|
||||
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
|
||||
else:
|
||||
debug("getting variables")
|
||||
self._display.debug("getting variables")
|
||||
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
|
||||
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
|
||||
templar = Templar(loader=self._loader, variables=task_vars)
|
||||
debug("done getting variables")
|
||||
self._display.debug("done getting variables")
|
||||
|
||||
if not callback_sent:
|
||||
temp_task = task.copy()
|
||||
|
@ -206,12 +205,12 @@ class StrategyModule(StrategyBase):
|
|||
if run_once:
|
||||
break
|
||||
|
||||
debug("done queuing things up, now waiting for results queue to drain")
|
||||
self._display.debug("done queuing things up, now waiting for results queue to drain")
|
||||
results = self._wait_on_pending_results(iterator)
|
||||
host_results.extend(results)
|
||||
|
||||
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
|
||||
debug("out of hosts to run on")
|
||||
self._display.debug("out of hosts to run on")
|
||||
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
|
||||
result = False
|
||||
break
|
||||
|
@ -236,8 +235,7 @@ class StrategyModule(StrategyBase):
|
|||
except AnsibleError, e:
|
||||
for host in included_file._hosts:
|
||||
iterator.mark_host_failed(host)
|
||||
# FIXME: callback here?
|
||||
print(e)
|
||||
self._display.warning(str(e))
|
||||
continue
|
||||
|
||||
for new_block in new_blocks:
|
||||
|
@ -256,9 +254,9 @@ class StrategyModule(StrategyBase):
|
|||
for host in hosts_left:
|
||||
iterator.add_tasks(host, all_blocks[host])
|
||||
|
||||
debug("results queue empty")
|
||||
self._display.debug("results queue empty")
|
||||
except (IOError, EOFError), e:
|
||||
debug("got IOError/EOFError in task loop: %s" % e)
|
||||
self._display.debug("got IOError/EOFError in task loop: %s" % e)
|
||||
# most likely an abort, return failed
|
||||
return False
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ import os
|
|||
import random
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from multiprocessing import Lock
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
|
@ -44,6 +46,7 @@ class Display:
|
|||
self.cowsay = None
|
||||
self.noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
|
||||
self.set_cowsay_info()
|
||||
#self.debug_lock = Lock()
|
||||
|
||||
def set_cowsay_info(self):
|
||||
|
||||
|
@ -102,6 +105,14 @@ class Display:
|
|||
def vvvvvv(self, msg, host=None):
|
||||
return self.verbose(msg, host=host, caplevel=5)
|
||||
|
||||
def debug(self, msg):
|
||||
if C.DEFAULT_DEBUG:
|
||||
# FIXME: enable when display is inherited to all
|
||||
#self.debug_lock.acquire()
|
||||
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray')
|
||||
sys.stdout.flush()
|
||||
#self.debug_lock.release()
|
||||
|
||||
def verbose(self, msg, host=None, caplevel=2):
|
||||
# FIXME: this needs to be implemented
|
||||
#msg = utils.sanitize_output(msg)
|
||||
|
|
Loading…
Reference in a new issue