mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Creating playbook executor and dependent classes
This commit is contained in:
parent
b6c3670f8a
commit
62d79568be
158 changed files with 22486 additions and 2353 deletions
|
@ -18,3 +18,5 @@
|
||||||
# Make coding more python3-ish
|
# Make coding more python3-ish
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
__version__ = '1.v2'
|
||||||
|
|
|
@ -104,6 +104,7 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
|
||||||
DEFAULTS='defaults'
|
DEFAULTS='defaults'
|
||||||
|
|
||||||
# configurable things
|
# configurable things
|
||||||
|
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
|
||||||
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts'))
|
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts'))
|
||||||
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
|
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
|
||||||
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
|
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
|
||||||
|
|
|
@ -21,7 +21,7 @@ __metaclass__ = type
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from ansible.parsing.yaml.strings import *
|
from ansible.errors.yaml_strings import *
|
||||||
|
|
||||||
class AnsibleError(Exception):
|
class AnsibleError(Exception):
|
||||||
'''
|
'''
|
||||||
|
@ -45,12 +45,12 @@ class AnsibleError(Exception):
|
||||||
|
|
||||||
self._obj = obj
|
self._obj = obj
|
||||||
self._show_content = show_content
|
self._show_content = show_content
|
||||||
if isinstance(self._obj, AnsibleBaseYAMLObject):
|
if obj and isinstance(obj, AnsibleBaseYAMLObject):
|
||||||
extended_error = self._get_extended_error()
|
extended_error = self._get_extended_error()
|
||||||
if extended_error:
|
if extended_error:
|
||||||
self.message = '%s\n\n%s' % (message, extended_error)
|
self.message = 'ERROR! %s\n\n%s' % (message, extended_error)
|
||||||
else:
|
else:
|
||||||
self.message = message
|
self.message = 'ERROR! %s' % message
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.message
|
return self.message
|
||||||
|
@ -98,8 +98,9 @@ class AnsibleError(Exception):
|
||||||
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
|
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
|
||||||
if target_line:
|
if target_line:
|
||||||
stripped_line = target_line.replace(" ","")
|
stripped_line = target_line.replace(" ","")
|
||||||
arrow_line = (" " * (col_number-1)) + "^"
|
arrow_line = (" " * (col_number-1)) + "^ here"
|
||||||
error_message += "%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
|
#header_line = ("=" * 73)
|
||||||
|
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
|
||||||
|
|
||||||
# common error/remediation checking here:
|
# common error/remediation checking here:
|
||||||
# check for unquoted vars starting lines
|
# check for unquoted vars starting lines
|
||||||
|
@ -158,3 +159,11 @@ class AnsibleModuleError(AnsibleRuntimeError):
|
||||||
class AnsibleConnectionFailure(AnsibleRuntimeError):
|
class AnsibleConnectionFailure(AnsibleRuntimeError):
|
||||||
''' the transport / connection_plugin had a fatal error '''
|
''' the transport / connection_plugin had a fatal error '''
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
class AnsibleFilterError(AnsibleRuntimeError):
|
||||||
|
''' a templating failure '''
|
||||||
|
pass
|
||||||
|
|
||||||
|
class AnsibleUndefinedVariable(AnsibleRuntimeError):
|
||||||
|
''' a templating failure '''
|
||||||
|
pass
|
||||||
|
|
167
v2/ansible/executor/connection_info.py
Normal file
167
v2/ansible/executor/connection_info.py
Normal file
|
@ -0,0 +1,167 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import pipes
|
||||||
|
import random
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['ConnectionInformation']
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionInformation:
|
||||||
|
|
||||||
|
'''
|
||||||
|
This class is used to consolidate the connection information for
|
||||||
|
hosts in a play and child tasks, where the task may override some
|
||||||
|
connection/authentication information.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, play=None, options=None):
|
||||||
|
# FIXME: implement the new methodology here for supporting
|
||||||
|
# various different auth escalation methods (becomes, etc.)
|
||||||
|
|
||||||
|
self.connection = C.DEFAULT_TRANSPORT
|
||||||
|
self.remote_user = 'root'
|
||||||
|
self.password = ''
|
||||||
|
self.port = 22
|
||||||
|
self.su = False
|
||||||
|
self.su_user = ''
|
||||||
|
self.su_pass = ''
|
||||||
|
self.sudo = False
|
||||||
|
self.sudo_user = ''
|
||||||
|
self.sudo_pass = ''
|
||||||
|
self.verbosity = 0
|
||||||
|
self.only_tags = set()
|
||||||
|
self.skip_tags = set()
|
||||||
|
|
||||||
|
if play:
|
||||||
|
self.set_play(play)
|
||||||
|
|
||||||
|
if options:
|
||||||
|
self.set_options(options)
|
||||||
|
|
||||||
|
def set_play(self, play):
|
||||||
|
'''
|
||||||
|
Configures this connection information instance with data from
|
||||||
|
the play class.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if play.connection:
|
||||||
|
self.connection = play.connection
|
||||||
|
|
||||||
|
self.remote_user = play.remote_user
|
||||||
|
self.password = ''
|
||||||
|
self.port = int(play.port) if play.port else 22
|
||||||
|
self.su = play.su
|
||||||
|
self.su_user = play.su_user
|
||||||
|
self.su_pass = play.su_pass
|
||||||
|
self.sudo = play.sudo
|
||||||
|
self.sudo_user = play.sudo_user
|
||||||
|
self.sudo_pass = play.sudo_pass
|
||||||
|
|
||||||
|
def set_options(self, options):
|
||||||
|
'''
|
||||||
|
Configures this connection information instance with data from
|
||||||
|
options specified by the user on the command line. These have a
|
||||||
|
higher precedence than those set on the play or host.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# FIXME: set other values from options here?
|
||||||
|
|
||||||
|
self.verbosity = options.verbosity
|
||||||
|
if options.connection:
|
||||||
|
self.connection = options.connection
|
||||||
|
|
||||||
|
# get the tag info from options, converting a comma-separated list
|
||||||
|
# of values into a proper list if need be
|
||||||
|
if isinstance(options.tags, list):
|
||||||
|
self.only_tags.update(options.tags)
|
||||||
|
elif isinstance(options.tags, basestring):
|
||||||
|
self.only_tags.update(options.tags.split(','))
|
||||||
|
if isinstance(options.skip_tags, list):
|
||||||
|
self.skip_tags.update(options.skip_tags)
|
||||||
|
elif isinstance(options.skip_tags, basestring):
|
||||||
|
self.skip_tags.update(options.skip_tags.split(','))
|
||||||
|
|
||||||
|
def copy(self, ci):
|
||||||
|
'''
|
||||||
|
Copies the connection info from another connection info object, used
|
||||||
|
when merging in data from task overrides.
|
||||||
|
'''
|
||||||
|
|
||||||
|
self.connection = ci.connection
|
||||||
|
self.remote_user = ci.remote_user
|
||||||
|
self.password = ci.password
|
||||||
|
self.port = ci.port
|
||||||
|
self.su = ci.su
|
||||||
|
self.su_user = ci.su_user
|
||||||
|
self.su_pass = ci.su_pass
|
||||||
|
self.sudo = ci.sudo
|
||||||
|
self.sudo_user = ci.sudo_user
|
||||||
|
self.sudo_pass = ci.sudo_pass
|
||||||
|
self.verbosity = ci.verbosity
|
||||||
|
self.only_tags = ci.only_tags.copy()
|
||||||
|
self.skip_tags = ci.skip_tags.copy()
|
||||||
|
|
||||||
|
def set_task_override(self, task):
|
||||||
|
'''
|
||||||
|
Sets attributes from the task if they are set, which will override
|
||||||
|
those from the play.
|
||||||
|
'''
|
||||||
|
|
||||||
|
new_info = ConnectionInformation()
|
||||||
|
new_info.copy(self)
|
||||||
|
|
||||||
|
for attr in ('connection', 'remote_user', 'su', 'su_user', 'su_pass', 'sudo', 'sudo_user', 'sudo_pass'):
|
||||||
|
if hasattr(task, attr):
|
||||||
|
attr_val = getattr(task, attr)
|
||||||
|
if attr_val:
|
||||||
|
setattr(new_info, attr, attr_val)
|
||||||
|
|
||||||
|
return new_info
|
||||||
|
|
||||||
|
def make_sudo_cmd(self, sudo_exe, executable, cmd):
|
||||||
|
"""
|
||||||
|
Helper function for wrapping commands with sudo.
|
||||||
|
|
||||||
|
Rather than detect if sudo wants a password this time, -k makes
|
||||||
|
sudo always ask for a password if one is required. Passing a quoted
|
||||||
|
compound command to sudo (or sudo -s) directly doesn't work, so we
|
||||||
|
shellquote it with pipes.quote() and pass the quoted string to the
|
||||||
|
user's shell. We loop reading output until we see the randomly-
|
||||||
|
generated sudo prompt set with the -p option.
|
||||||
|
"""
|
||||||
|
|
||||||
|
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
|
||||||
|
prompt = '[sudo via ansible, key=%s] password: ' % randbits
|
||||||
|
success_key = 'SUDO-SUCCESS-%s' % randbits
|
||||||
|
|
||||||
|
sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
|
||||||
|
sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt,
|
||||||
|
self.sudo_user, executable or '$SHELL',
|
||||||
|
pipes.quote('echo %s; %s' % (success_key, cmd))
|
||||||
|
)
|
||||||
|
|
||||||
|
#return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
|
||||||
|
return (sudocmd, prompt, success_key)
|
||||||
|
|
66
v2/ansible/executor/manager.py
Normal file
66
v2/ansible/executor/manager.py
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from multiprocessing.managers import SyncManager, BaseProxy
|
||||||
|
from ansible.playbook.handler import Handler
|
||||||
|
from ansible.playbook.task import Task
|
||||||
|
from ansible.playbook.play import Play
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
|
||||||
|
__all__ = ['AnsibleManager']
|
||||||
|
|
||||||
|
|
||||||
|
class VariableManagerWrapper:
|
||||||
|
'''
|
||||||
|
This class simply acts as a wrapper around the VariableManager class,
|
||||||
|
since manager proxies expect a new object to be returned rather than
|
||||||
|
any existing one. Using this wrapper, a shared proxy can be created
|
||||||
|
and an existing VariableManager class assigned to it, which can then
|
||||||
|
be accessed through the exposed proxy methods.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._vm = None
|
||||||
|
|
||||||
|
def get_vars(self, loader, play=None, host=None, task=None):
|
||||||
|
return self._vm.get_vars(loader=loader, play=play, host=host, task=task)
|
||||||
|
|
||||||
|
def set_variable_manager(self, vm):
|
||||||
|
self._vm = vm
|
||||||
|
|
||||||
|
def set_host_variable(self, host, varname, value):
|
||||||
|
self._vm.set_host_variable(host, varname, value)
|
||||||
|
|
||||||
|
def set_host_facts(self, host, facts):
|
||||||
|
self._vm.set_host_facts(host, facts)
|
||||||
|
|
||||||
|
class AnsibleManager(SyncManager):
|
||||||
|
'''
|
||||||
|
This is our custom manager class, which exists only so we may register
|
||||||
|
the new proxy below
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
AnsibleManager.register(
|
||||||
|
typeid='VariableManagerWrapper',
|
||||||
|
callable=VariableManagerWrapper,
|
||||||
|
)
|
||||||
|
|
185
v2/ansible/executor/module_common.py
Normal file
185
v2/ansible/executor/module_common.py
Normal file
|
@ -0,0 +1,185 @@
|
||||||
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# from python and deps
|
||||||
|
from cStringIO import StringIO
|
||||||
|
import inspect
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
# from Ansible
|
||||||
|
from ansible import __version__
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.parsing.utils.jsonify import jsonify
|
||||||
|
|
||||||
|
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
|
||||||
|
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
|
||||||
|
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
|
||||||
|
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
|
||||||
|
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
|
||||||
|
|
||||||
|
class ModuleReplacer(object):
|
||||||
|
|
||||||
|
"""
|
||||||
|
The Replacer is used to insert chunks of code into modules before
|
||||||
|
transfer. Rather than doing classical python imports, this allows for more
|
||||||
|
efficient transfer in a no-bootstrapping scenario by not moving extra files
|
||||||
|
over the wire, and also takes care of embedding arguments in the transferred
|
||||||
|
modules.
|
||||||
|
|
||||||
|
This version is done in such a way that local imports can still be
|
||||||
|
used in the module code, so IDEs don't have to be aware of what is going on.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
... will result in the insertion basic.py into the module
|
||||||
|
|
||||||
|
from the module_utils/ directory in the source tree.
|
||||||
|
|
||||||
|
All modules are required to import at least basic, though there will also
|
||||||
|
be other snippets.
|
||||||
|
|
||||||
|
# POWERSHELL_COMMON
|
||||||
|
|
||||||
|
Also results in the inclusion of the common code in powershell.ps1
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ******************************************************************************
|
||||||
|
|
||||||
|
def __init__(self, strip_comments=False):
|
||||||
|
# FIXME: these members need to be prefixed with '_' and the rest of the file fixed
|
||||||
|
this_file = inspect.getfile(inspect.currentframe())
|
||||||
|
# we've moved the module_common relative to the snippets, so fix the path
|
||||||
|
self.snippet_path = os.path.join(os.path.dirname(this_file), '..', 'module_utils')
|
||||||
|
self.strip_comments = strip_comments
|
||||||
|
|
||||||
|
# ******************************************************************************
|
||||||
|
|
||||||
|
|
||||||
|
def slurp(self, path):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
raise AnsibleError("imported module support code does not exist at %s" % path)
|
||||||
|
fd = open(path)
|
||||||
|
data = fd.read()
|
||||||
|
fd.close()
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _find_snippet_imports(self, module_data, module_path):
|
||||||
|
"""
|
||||||
|
Given the source of the module, convert it to a Jinja2 template to insert
|
||||||
|
module code and return whether it's a new or old style module.
|
||||||
|
"""
|
||||||
|
|
||||||
|
module_style = 'old'
|
||||||
|
if REPLACER in module_data:
|
||||||
|
module_style = 'new'
|
||||||
|
elif 'from ansible.module_utils.' in module_data:
|
||||||
|
module_style = 'new'
|
||||||
|
elif 'WANT_JSON' in module_data:
|
||||||
|
module_style = 'non_native_want_json'
|
||||||
|
|
||||||
|
output = StringIO()
|
||||||
|
lines = module_data.split('\n')
|
||||||
|
snippet_names = []
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
|
||||||
|
if REPLACER in line:
|
||||||
|
output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
|
||||||
|
snippet_names.append('basic')
|
||||||
|
if REPLACER_WINDOWS in line:
|
||||||
|
ps_data = self.slurp(os.path.join(self.snippet_path, "powershell.ps1"))
|
||||||
|
output.write(ps_data)
|
||||||
|
snippet_names.append('powershell')
|
||||||
|
elif line.startswith('from ansible.module_utils.'):
|
||||||
|
tokens=line.split(".")
|
||||||
|
import_error = False
|
||||||
|
if len(tokens) != 3:
|
||||||
|
import_error = True
|
||||||
|
if " import *" not in line:
|
||||||
|
import_error = True
|
||||||
|
if import_error:
|
||||||
|
raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
|
||||||
|
snippet_name = tokens[2].split()[0]
|
||||||
|
snippet_names.append(snippet_name)
|
||||||
|
output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
|
||||||
|
else:
|
||||||
|
if self.strip_comments and line.startswith("#") or line == '':
|
||||||
|
pass
|
||||||
|
output.write(line)
|
||||||
|
output.write("\n")
|
||||||
|
|
||||||
|
if not module_path.endswith(".ps1"):
|
||||||
|
# Unixy modules
|
||||||
|
if len(snippet_names) > 0 and not 'basic' in snippet_names:
|
||||||
|
raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
|
||||||
|
else:
|
||||||
|
# Windows modules
|
||||||
|
if len(snippet_names) > 0 and not 'powershell' in snippet_names:
|
||||||
|
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
|
||||||
|
|
||||||
|
return (output.getvalue(), module_style)
|
||||||
|
|
||||||
|
# ******************************************************************************
|
||||||
|
|
||||||
|
def modify_module(self, module_path, module_args):
|
||||||
|
|
||||||
|
with open(module_path) as f:
|
||||||
|
|
||||||
|
# read in the module source
|
||||||
|
module_data = f.read()
|
||||||
|
|
||||||
|
(module_data, module_style) = self._find_snippet_imports(module_data, module_path)
|
||||||
|
|
||||||
|
#module_args_json = jsonify(module_args)
|
||||||
|
module_args_json = json.dumps(module_args)
|
||||||
|
encoded_args = repr(module_args_json.encode('utf-8'))
|
||||||
|
|
||||||
|
# these strings should be part of the 'basic' snippet which is required to be included
|
||||||
|
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
|
||||||
|
module_data = module_data.replace(REPLACER_ARGS, "''")
|
||||||
|
module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
|
||||||
|
|
||||||
|
# FIXME: we're not passing around an inject dictionary anymore, so
|
||||||
|
# this needs to be fixed with whatever method we use for vars
|
||||||
|
# like this moving forward
|
||||||
|
#if module_style == 'new':
|
||||||
|
# facility = C.DEFAULT_SYSLOG_FACILITY
|
||||||
|
# if 'ansible_syslog_facility' in inject:
|
||||||
|
# facility = inject['ansible_syslog_facility']
|
||||||
|
# module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
|
||||||
|
|
||||||
|
lines = module_data.split("\n")
|
||||||
|
shebang = None
|
||||||
|
if lines[0].startswith("#!"):
|
||||||
|
shebang = lines[0].strip()
|
||||||
|
args = shlex.split(str(shebang[2:]))
|
||||||
|
interpreter = args[0]
|
||||||
|
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
|
||||||
|
|
||||||
|
# FIXME: more inject stuff here...
|
||||||
|
#if interpreter_config in inject:
|
||||||
|
# lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
|
||||||
|
# module_data = "\n".join(lines)
|
||||||
|
|
||||||
|
return (module_data, module_style, shebang)
|
||||||
|
|
258
v2/ansible/executor/play_iterator.py
Normal file
258
v2/ansible/executor/play_iterator.py
Normal file
|
@ -0,0 +1,258 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.errors import *
|
||||||
|
from ansible.playbook.task import Task
|
||||||
|
|
||||||
|
from ansible.utils.boolean import boolean
|
||||||
|
|
||||||
|
__all__ = ['PlayIterator']
|
||||||
|
|
||||||
|
|
||||||
|
# the primary running states for the play iteration
|
||||||
|
ITERATING_SETUP = 0
|
||||||
|
ITERATING_TASKS = 1
|
||||||
|
ITERATING_RESCUE = 2
|
||||||
|
ITERATING_ALWAYS = 3
|
||||||
|
ITERATING_COMPLETE = 4
|
||||||
|
|
||||||
|
# the failure states for the play iteration
|
||||||
|
FAILED_NONE = 0
|
||||||
|
FAILED_SETUP = 1
|
||||||
|
FAILED_TASKS = 2
|
||||||
|
FAILED_RESCUE = 3
|
||||||
|
FAILED_ALWAYS = 4
|
||||||
|
|
||||||
|
class PlayState:
|
||||||
|
|
||||||
|
'''
|
||||||
|
A helper class, which keeps track of the task iteration
|
||||||
|
state for a given playbook. This is used in the PlaybookIterator
|
||||||
|
class on a per-host basis.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# FIXME: this class is the representation of a finite state machine,
|
||||||
|
# so we really should have a well defined state representation
|
||||||
|
# documented somewhere...
|
||||||
|
|
||||||
|
def __init__(self, parent_iterator, host):
|
||||||
|
'''
|
||||||
|
Create the initial state, which tracks the running state as well
|
||||||
|
as the failure state, which are used when executing block branches
|
||||||
|
(rescue/always)
|
||||||
|
'''
|
||||||
|
|
||||||
|
self._run_state = ITERATING_SETUP
|
||||||
|
self._failed_state = FAILED_NONE
|
||||||
|
self._task_list = parent_iterator._play.compile()
|
||||||
|
self._gather_facts = parent_iterator._play.gather_facts
|
||||||
|
self._host = host
|
||||||
|
|
||||||
|
self._cur_block = None
|
||||||
|
self._cur_role = None
|
||||||
|
self._cur_task_pos = 0
|
||||||
|
self._cur_rescue_pos = 0
|
||||||
|
self._cur_always_pos = 0
|
||||||
|
self._cur_handler_pos = 0
|
||||||
|
|
||||||
|
def next(self, peek=False):
|
||||||
|
'''
|
||||||
|
Determines and returns the next available task from the playbook,
|
||||||
|
advancing through the list of plays as it goes. If peek is set to True,
|
||||||
|
the internal state is not stored.
|
||||||
|
'''
|
||||||
|
|
||||||
|
task = None
|
||||||
|
|
||||||
|
# save this locally so that we can peek at the next task
|
||||||
|
# without updating the internal state of the iterator
|
||||||
|
run_state = self._run_state
|
||||||
|
failed_state = self._failed_state
|
||||||
|
cur_block = self._cur_block
|
||||||
|
cur_role = self._cur_role
|
||||||
|
cur_task_pos = self._cur_task_pos
|
||||||
|
cur_rescue_pos = self._cur_rescue_pos
|
||||||
|
cur_always_pos = self._cur_always_pos
|
||||||
|
cur_handler_pos = self._cur_handler_pos
|
||||||
|
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if run_state == ITERATING_SETUP:
|
||||||
|
if failed_state == FAILED_SETUP:
|
||||||
|
run_state = ITERATING_COMPLETE
|
||||||
|
else:
|
||||||
|
run_state = ITERATING_TASKS
|
||||||
|
|
||||||
|
if self._gather_facts == 'smart' and not self._host.gathered_facts or boolean(self._gather_facts):
|
||||||
|
self._host.set_gathered_facts(True)
|
||||||
|
task = Task()
|
||||||
|
task.action = 'setup'
|
||||||
|
break
|
||||||
|
elif run_state == ITERATING_TASKS:
|
||||||
|
# if there is any failure state besides FAILED_NONE, we should
|
||||||
|
# change to some other running state
|
||||||
|
if failed_state != FAILED_NONE or cur_task_pos > len(self._task_list) - 1:
|
||||||
|
# if there is a block (and there always should be), start running
|
||||||
|
# the rescue portion if it exists (and if we haven't failed that
|
||||||
|
# already), or the always portion (if it exists and we didn't fail
|
||||||
|
# there too). Otherwise, we're done iterating.
|
||||||
|
if cur_block:
|
||||||
|
if failed_state != FAILED_RESCUE and cur_block.rescue:
|
||||||
|
run_state = ITERATING_RESCUE
|
||||||
|
cur_rescue_pos = 0
|
||||||
|
elif failed_state != FAILED_ALWAYS and cur_block.always:
|
||||||
|
run_state = ITERATING_ALWAYS
|
||||||
|
cur_always_pos = 0
|
||||||
|
else:
|
||||||
|
run_state = ITERATING_COMPLETE
|
||||||
|
else:
|
||||||
|
run_state = ITERATING_COMPLETE
|
||||||
|
else:
|
||||||
|
task = self._task_list[cur_task_pos]
|
||||||
|
if cur_block is not None and cur_block != task._block:
|
||||||
|
run_state = ITERATING_ALWAYS
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
cur_block = task._block
|
||||||
|
cur_task_pos += 1
|
||||||
|
|
||||||
|
# Break out of the while loop now that we have our task
|
||||||
|
break
|
||||||
|
|
||||||
|
elif run_state == ITERATING_RESCUE:
|
||||||
|
# If we're iterating through the rescue tasks, make sure we haven't
|
||||||
|
# failed yet. If so, move on to the always block or if not get the
|
||||||
|
# next rescue task (if one exists)
|
||||||
|
if failed_state == FAILED_RESCUE or cur_block.rescue is None or cur_rescue_pos > len(cur_block.rescue) - 1:
|
||||||
|
run_state = ITERATING_ALWAYS
|
||||||
|
else:
|
||||||
|
task = cur_block.rescue[cur_rescue_pos]
|
||||||
|
cur_rescue_pos += 1
|
||||||
|
break
|
||||||
|
|
||||||
|
elif run_state == ITERATING_ALWAYS:
|
||||||
|
# If we're iterating through the always tasks, make sure we haven't
|
||||||
|
# failed yet. If so, we're done iterating otherwise get the next always
|
||||||
|
# task (if one exists)
|
||||||
|
if failed_state == FAILED_ALWAYS or cur_block.always is None or cur_always_pos > len(cur_block.always) - 1:
|
||||||
|
cur_block = None
|
||||||
|
if failed_state == FAILED_ALWAYS or cur_task_pos > len(self._task_list) - 1:
|
||||||
|
run_state = ITERATING_COMPLETE
|
||||||
|
else:
|
||||||
|
run_state = ITERATING_TASKS
|
||||||
|
else:
|
||||||
|
task = cur_block.always[cur_always_pos]
|
||||||
|
cur_always_pos += 1
|
||||||
|
break
|
||||||
|
|
||||||
|
elif run_state == ITERATING_COMPLETE:
|
||||||
|
# done iterating, return None to signify that
|
||||||
|
return None
|
||||||
|
|
||||||
|
if task._role:
|
||||||
|
if cur_role and task._role != cur_role:
|
||||||
|
cur_role._completed = True
|
||||||
|
cur_role = task._role
|
||||||
|
|
||||||
|
# If we're not just peeking at the next task, save the internal state
|
||||||
|
if not peek:
|
||||||
|
self._run_state = run_state
|
||||||
|
self._failed_state = failed_state
|
||||||
|
self._cur_block = cur_block
|
||||||
|
self._cur_role = cur_role
|
||||||
|
self._cur_task_pos = cur_task_pos
|
||||||
|
self._cur_rescue_pos = cur_rescue_pos
|
||||||
|
self._cur_always_pos = cur_always_pos
|
||||||
|
self._cur_handler_pos = cur_handler_pos
|
||||||
|
|
||||||
|
return task
|
||||||
|
|
||||||
|
def mark_failed(self):
|
||||||
|
'''
|
||||||
|
Escalates the failed state relative to the running state.
|
||||||
|
'''
|
||||||
|
if self._run_state == ITERATING_SETUP:
|
||||||
|
self._failed_state = FAILED_SETUP
|
||||||
|
elif self._run_state == ITERATING_TASKS:
|
||||||
|
self._failed_state = FAILED_TASKS
|
||||||
|
elif self._run_state == ITERATING_RESCUE:
|
||||||
|
self._failed_state = FAILED_RESCUE
|
||||||
|
elif self._run_state == ITERATING_ALWAYS:
|
||||||
|
self._failed_state = FAILED_ALWAYS
|
||||||
|
|
||||||
|
|
||||||
|
class PlayIterator:
|
||||||
|
|
||||||
|
'''
|
||||||
|
The main iterator class, which keeps the state of the playbook
|
||||||
|
on a per-host basis using the above PlaybookState class.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, inventory, play):
|
||||||
|
self._play = play
|
||||||
|
self._inventory = inventory
|
||||||
|
self._host_entries = dict()
|
||||||
|
self._first_host = None
|
||||||
|
|
||||||
|
# Build the per-host dictionary of playbook states, using a copy
|
||||||
|
# of the play object so we can post_validate it to ensure any templated
|
||||||
|
# fields are filled in without modifying the original object, since
|
||||||
|
# post_validate() saves the templated values.
|
||||||
|
|
||||||
|
# FIXME: this is a hacky way of doing this, the iterator should
|
||||||
|
# instead get the loader and variable manager directly
|
||||||
|
# as args to __init__
|
||||||
|
all_vars = inventory._variable_manager.get_vars(loader=inventory._loader, play=play)
|
||||||
|
new_play = play.copy()
|
||||||
|
new_play.post_validate(all_vars, ignore_undefined=True)
|
||||||
|
|
||||||
|
for host in inventory.get_hosts(new_play.hosts):
|
||||||
|
if self._first_host is None:
|
||||||
|
self._first_host = host
|
||||||
|
self._host_entries[host.get_name()] = PlayState(parent_iterator=self, host=host)
|
||||||
|
|
||||||
|
# FIXME: remove, probably not required anymore
|
||||||
|
#def get_next_task(self, peek=False):
|
||||||
|
# ''' returns the next task for host[0] '''
|
||||||
|
#
|
||||||
|
# first_entry = self._host_entries[self._first_host.get_name()]
|
||||||
|
# if not peek:
|
||||||
|
# for entry in self._host_entries:
|
||||||
|
# if entry != self._first_host.get_name():
|
||||||
|
# target_entry = self._host_entries[entry]
|
||||||
|
# if target_entry._cur_task_pos == first_entry._cur_task_pos:
|
||||||
|
# target_entry.next()
|
||||||
|
# return first_entry.next(peek=peek)
|
||||||
|
|
||||||
|
def get_next_task_for_host(self, host, peek=False):
|
||||||
|
''' fetch the next task for the given host '''
|
||||||
|
if host.get_name() not in self._host_entries:
|
||||||
|
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
|
||||||
|
|
||||||
|
return self._host_entries[host.get_name()].next(peek=peek)
|
||||||
|
|
||||||
|
def mark_host_failed(self, host):
|
||||||
|
''' mark the given host as failed '''
|
||||||
|
if host.get_name() not in self._host_entries:
|
||||||
|
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
|
||||||
|
|
||||||
|
self._host_entries[host.get_name()].mark_failed()
|
||||||
|
|
|
@ -19,17 +19,110 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import signal
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.errors import *
|
||||||
|
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||||
|
from ansible.playbook import Playbook
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
class PlaybookExecutor:
|
class PlaybookExecutor:
|
||||||
|
|
||||||
def __init__(self, list_of_plays=[]):
|
'''
|
||||||
# self.tqm = TaskQueueManager(forks)
|
This is the primary class for executing playbooks, and thus the
|
||||||
assert False
|
basis for bin/ansible-playbook operation.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, playbooks, inventory, variable_manager, loader, options):
|
||||||
|
self._playbooks = playbooks
|
||||||
|
self._inventory = inventory
|
||||||
|
self._variable_manager = variable_manager
|
||||||
|
self._loader = loader
|
||||||
|
self._options = options
|
||||||
|
|
||||||
|
self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
# for play in list_of_plays:
|
|
||||||
# for block in play.blocks:
|
|
||||||
# # block must know it’s playbook class and context
|
|
||||||
# tqm.enqueue(block)
|
|
||||||
# tqm.go()...
|
|
||||||
assert False
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
Run the given playbook, based on the settings in the play which
|
||||||
|
may limit the runs to serialized groups, etc.
|
||||||
|
'''
|
||||||
|
|
||||||
|
signal.signal(signal.SIGINT, self._cleanup)
|
||||||
|
|
||||||
|
try:
|
||||||
|
for playbook_path in self._playbooks:
|
||||||
|
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
|
||||||
|
|
||||||
|
# FIXME: playbook entries are just plays, so we should rename them
|
||||||
|
for play in pb.get_entries():
|
||||||
|
self._inventory.remove_restriction()
|
||||||
|
|
||||||
|
# Create a temporary copy of the play here, so we can run post_validate
|
||||||
|
# on it without the templating changes affecting the original object.
|
||||||
|
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
|
||||||
|
new_play = play.copy()
|
||||||
|
new_play.post_validate(all_vars, ignore_undefined=True)
|
||||||
|
|
||||||
|
result = True
|
||||||
|
for batch in self._get_serialized_batches(new_play):
|
||||||
|
if len(batch) == 0:
|
||||||
|
raise AnsibleError("No hosts matched the list specified in the play", obj=play._ds)
|
||||||
|
# restrict the inventory to the hosts in the serialized batch
|
||||||
|
self._inventory.restrict_to_hosts(batch)
|
||||||
|
# and run it...
|
||||||
|
result = self._tqm.run(play=play)
|
||||||
|
if not result:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
# FIXME: do something here, to signify the playbook execution failed
|
||||||
|
self._cleanup()
|
||||||
|
return 1
|
||||||
|
except:
|
||||||
|
self._cleanup()
|
||||||
|
raise
|
||||||
|
|
||||||
|
self._cleanup()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def _cleanup(self, signum=None, framenum=None):
|
||||||
|
self._tqm.cleanup()
|
||||||
|
|
||||||
|
def _get_serialized_batches(self, play):
|
||||||
|
'''
|
||||||
|
Returns a list of hosts, subdivided into batches based on
|
||||||
|
the serial size specified in the play.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# make sure we have a unique list of hosts
|
||||||
|
all_hosts = self._inventory.get_hosts(play.hosts)
|
||||||
|
|
||||||
|
# check to see if the serial number was specified as a percentage,
|
||||||
|
# and convert it to an integer value based on the number of hosts
|
||||||
|
if isinstance(play.serial, basestring) and play.serial.endswith('%'):
|
||||||
|
serial_pct = int(play.serial.replace("%",""))
|
||||||
|
serial = int((serial_pct/100.0) * len(all_hosts))
|
||||||
|
else:
|
||||||
|
serial = int(play.serial)
|
||||||
|
|
||||||
|
# if the serial count was not specified or is invalid, default to
|
||||||
|
# a list of all hosts, otherwise split the list of hosts into chunks
|
||||||
|
# which are based on the serial size
|
||||||
|
if serial <= 0:
|
||||||
|
return [all_hosts]
|
||||||
|
else:
|
||||||
|
serialized_batches = []
|
||||||
|
|
||||||
|
while len(all_hosts) > 0:
|
||||||
|
play_hosts = []
|
||||||
|
for x in range(serial):
|
||||||
|
if len(all_hosts) > 0:
|
||||||
|
play_hosts.append(all_hosts.pop(0))
|
||||||
|
|
||||||
|
serialized_batches.append(play_hosts)
|
||||||
|
|
||||||
|
return serialized_batches
|
||||||
|
|
|
@ -1,125 +0,0 @@
|
||||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Make coding more python3-ish
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
class PlaybookState:
|
|
||||||
|
|
||||||
'''
|
|
||||||
A helper class, which keeps track of the task iteration
|
|
||||||
state for a given playbook. This is used in the PlaybookIterator
|
|
||||||
class on a per-host basis.
|
|
||||||
'''
|
|
||||||
def __init__(self, parent_iterator):
|
|
||||||
self._parent_iterator = parent_iterator
|
|
||||||
self._cur_play = 0
|
|
||||||
self._task_list = None
|
|
||||||
self._cur_task_pos = 0
|
|
||||||
self._done = False
|
|
||||||
|
|
||||||
def next(self, peek=False):
|
|
||||||
'''
|
|
||||||
Determines and returns the next available task from the playbook,
|
|
||||||
advancing through the list of plays as it goes.
|
|
||||||
'''
|
|
||||||
|
|
||||||
task = None
|
|
||||||
|
|
||||||
# we save these locally so that we can peek at the next task
|
|
||||||
# without updating the internal state of the iterator
|
|
||||||
cur_play = self._cur_play
|
|
||||||
task_list = self._task_list
|
|
||||||
cur_task_pos = self._cur_task_pos
|
|
||||||
|
|
||||||
while True:
|
|
||||||
# when we hit the end of the playbook entries list, we set a flag
|
|
||||||
# and return None to indicate we're there
|
|
||||||
# FIXME: accessing the entries and parent iterator playbook members
|
|
||||||
# should be done through accessor functions
|
|
||||||
if self._done or cur_play > len(self._parent_iterator._playbook._entries) - 1:
|
|
||||||
self._done = True
|
|
||||||
return None
|
|
||||||
|
|
||||||
# initialize the task list by calling the .compile() method
|
|
||||||
# on the play, which will call compile() for all child objects
|
|
||||||
if task_list is None:
|
|
||||||
task_list = self._parent_iterator._playbook._entries[cur_play].compile()
|
|
||||||
|
|
||||||
# if we've hit the end of this plays task list, move on to the next
|
|
||||||
# and reset the position values for the next iteration
|
|
||||||
if cur_task_pos > len(task_list) - 1:
|
|
||||||
cur_play += 1
|
|
||||||
task_list = None
|
|
||||||
cur_task_pos = 0
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
# FIXME: do tag/conditional evaluation here and advance
|
|
||||||
# the task position if it should be skipped without
|
|
||||||
# returning a task
|
|
||||||
task = task_list[cur_task_pos]
|
|
||||||
cur_task_pos += 1
|
|
||||||
|
|
||||||
# Skip the task if it is the member of a role which has already
|
|
||||||
# been run, unless the role allows multiple executions
|
|
||||||
if task._role:
|
|
||||||
# FIXME: this should all be done via member functions
|
|
||||||
# instead of direct access to internal variables
|
|
||||||
if task._role.has_run() and not task._role._metadata._allow_duplicates:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Break out of the while loop now that we have our task
|
|
||||||
break
|
|
||||||
|
|
||||||
# If we're not just peeking at the next task, save the internal state
|
|
||||||
if not peek:
|
|
||||||
self._cur_play = cur_play
|
|
||||||
self._task_list = task_list
|
|
||||||
self._cur_task_pos = cur_task_pos
|
|
||||||
|
|
||||||
return task
|
|
||||||
|
|
||||||
class PlaybookIterator:
|
|
||||||
|
|
||||||
'''
|
|
||||||
The main iterator class, which keeps the state of the playbook
|
|
||||||
on a per-host basis using the above PlaybookState class.
|
|
||||||
'''
|
|
||||||
|
|
||||||
def __init__(self, inventory, log_manager, playbook):
|
|
||||||
self._playbook = playbook
|
|
||||||
self._log_manager = log_manager
|
|
||||||
self._host_entries = dict()
|
|
||||||
self._first_host = None
|
|
||||||
|
|
||||||
# build the per-host dictionary of playbook states
|
|
||||||
for host in inventory.get_hosts():
|
|
||||||
if self._first_host is None:
|
|
||||||
self._first_host = host
|
|
||||||
self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self)
|
|
||||||
|
|
||||||
def get_next_task(self, peek=False):
|
|
||||||
''' returns the next task for host[0] '''
|
|
||||||
return self._host_entries[self._first_host.get_name()].next(peek=peek)
|
|
||||||
|
|
||||||
def get_next_task_for_host(self, host, peek=False):
|
|
||||||
''' fetch the next task for the given host '''
|
|
||||||
if host.get_name() not in self._host_entries:
|
|
||||||
raise AnsibleError("invalid host specified for playbook iteration")
|
|
||||||
|
|
||||||
return self._host_entries[host.get_name()].next(peek=peek)
|
|
155
v2/ansible/executor/process/result.py
Normal file
155
v2/ansible/executor/process/result.py
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import Queue
|
||||||
|
import multiprocessing
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
HAS_ATFORK=True
|
||||||
|
try:
|
||||||
|
from Crypto.Random import atfork
|
||||||
|
except ImportError:
|
||||||
|
HAS_ATFORK=False
|
||||||
|
|
||||||
|
from ansible.executor.task_result import TaskResult
|
||||||
|
from ansible.playbook.handler import Handler
|
||||||
|
from ansible.playbook.task import Task
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
__all__ = ['ResultProcess']
|
||||||
|
|
||||||
|
|
||||||
|
class ResultProcess(multiprocessing.Process):
|
||||||
|
'''
|
||||||
|
The result worker thread, which reads results from the results
|
||||||
|
queue and fires off callbacks/etc. as necessary.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, final_q, workers):
|
||||||
|
|
||||||
|
# takes a task queue manager as the sole param:
|
||||||
|
self._final_q = final_q
|
||||||
|
self._workers = workers
|
||||||
|
self._cur_worker = 0
|
||||||
|
self._terminated = False
|
||||||
|
|
||||||
|
super(ResultProcess, self).__init__()
|
||||||
|
|
||||||
|
def _send_result(self, result):
|
||||||
|
debug("sending result: %s" % (result,))
|
||||||
|
self._final_q.put(result, block=False)
|
||||||
|
debug("done sending result")
|
||||||
|
|
||||||
|
def _read_worker_result(self):
|
||||||
|
result = None
|
||||||
|
starting_point = self._cur_worker
|
||||||
|
while True:
|
||||||
|
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
|
||||||
|
self._cur_worker += 1
|
||||||
|
if self._cur_worker >= len(self._workers):
|
||||||
|
self._cur_worker = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not rslt_q.empty():
|
||||||
|
debug("worker %d has data to read" % self._cur_worker)
|
||||||
|
result = rslt_q.get(block=False)
|
||||||
|
debug("got a result from worker %d: %s" % (self._cur_worker, result))
|
||||||
|
break
|
||||||
|
except Queue.Empty:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if self._cur_worker == starting_point:
|
||||||
|
break
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def terminate(self):
|
||||||
|
self._terminated = True
|
||||||
|
super(ResultProcess, self).terminate()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
'''
|
||||||
|
The main thread execution, which reads from the results queue
|
||||||
|
indefinitely and sends callbacks/etc. when results are received.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if HAS_ATFORK:
|
||||||
|
atfork()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
result = self._read_worker_result()
|
||||||
|
if result is None:
|
||||||
|
time.sleep(0.1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
host_name = result._host.get_name()
|
||||||
|
|
||||||
|
# send callbacks, execute other options based on the result status
|
||||||
|
if result.is_failed():
|
||||||
|
#self._callback.runner_on_failed(result._task, result)
|
||||||
|
self._send_result(('host_task_failed', result))
|
||||||
|
elif result.is_unreachable():
|
||||||
|
#self._callback.runner_on_unreachable(result._task, result)
|
||||||
|
self._send_result(('host_unreachable', result))
|
||||||
|
elif result.is_skipped():
|
||||||
|
#self._callback.runner_on_skipped(result._task, result)
|
||||||
|
self._send_result(('host_task_skipped', result))
|
||||||
|
else:
|
||||||
|
#self._callback.runner_on_ok(result._task, result)
|
||||||
|
self._send_result(('host_task_ok', result))
|
||||||
|
|
||||||
|
# if this task is notifying a handler, do it now
|
||||||
|
if result._task.notify:
|
||||||
|
# The shared dictionary for notified handlers is a proxy, which
|
||||||
|
# does not detect when sub-objects within the proxy are modified.
|
||||||
|
# So, per the docs, we reassign the list so the proxy picks up and
|
||||||
|
# notifies all other threads
|
||||||
|
for notify in result._task.notify:
|
||||||
|
self._send_result(('notify_handler', notify, result._host))
|
||||||
|
|
||||||
|
# if this task is registering facts, do that now
|
||||||
|
if 'ansible_facts' in result._result:
|
||||||
|
if result._task.action in ('set_fact', 'include_vars'):
|
||||||
|
for (key, value) in result._result['ansible_facts'].iteritems():
|
||||||
|
self._send_result(('set_host_var', result._host, key, value))
|
||||||
|
else:
|
||||||
|
self._send_result(('set_host_facts', result._host, result._result['ansible_facts']))
|
||||||
|
|
||||||
|
# if this task is registering a result, do it now
|
||||||
|
if result._task.register:
|
||||||
|
self._send_result(('set_host_var', result._host, result._task.register, result._result))
|
||||||
|
|
||||||
|
except Queue.Empty:
|
||||||
|
pass
|
||||||
|
except (KeyboardInterrupt, IOError, EOFError):
|
||||||
|
break
|
||||||
|
except:
|
||||||
|
# FIXME: we should probably send a proper callback here instead of
|
||||||
|
# simply dumping a stack trace on the screen
|
||||||
|
traceback.print_exc()
|
||||||
|
break
|
||||||
|
|
141
v2/ansible/executor/process/worker.py
Normal file
141
v2/ansible/executor/process/worker.py
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import Queue
|
||||||
|
import multiprocessing
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
HAS_ATFORK=True
|
||||||
|
try:
|
||||||
|
from Crypto.Random import atfork
|
||||||
|
except ImportError:
|
||||||
|
HAS_ATFORK=False
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||||
|
from ansible.executor.task_executor import TaskExecutor
|
||||||
|
from ansible.executor.task_result import TaskResult
|
||||||
|
from ansible.playbook.handler import Handler
|
||||||
|
from ansible.playbook.task import Task
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
__all__ = ['ExecutorProcess']
|
||||||
|
|
||||||
|
|
||||||
|
class WorkerProcess(multiprocessing.Process):
|
||||||
|
'''
|
||||||
|
The worker thread class, which uses TaskExecutor to run tasks
|
||||||
|
read from a job queue and pushes results into a results queue
|
||||||
|
for reading later.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, tqm, main_q, rslt_q, loader, new_stdin):
|
||||||
|
|
||||||
|
# takes a task queue manager as the sole param:
|
||||||
|
self._main_q = main_q
|
||||||
|
self._rslt_q = rslt_q
|
||||||
|
self._loader = loader
|
||||||
|
|
||||||
|
# dupe stdin, if we have one
|
||||||
|
try:
|
||||||
|
fileno = sys.stdin.fileno()
|
||||||
|
except ValueError:
|
||||||
|
fileno = None
|
||||||
|
|
||||||
|
self._new_stdin = new_stdin
|
||||||
|
if not new_stdin and fileno is not None:
|
||||||
|
try:
|
||||||
|
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||||
|
except OSError, e:
|
||||||
|
# couldn't dupe stdin, most likely because it's
|
||||||
|
# not a valid file descriptor, so we just rely on
|
||||||
|
# using the one that was passed in
|
||||||
|
pass
|
||||||
|
|
||||||
|
super(WorkerProcess, self).__init__()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
'''
|
||||||
|
Called when the process is started, and loops indefinitely
|
||||||
|
until an error is encountered (typically an IOerror from the
|
||||||
|
queue pipe being disconnected). During the loop, we attempt
|
||||||
|
to pull tasks off the job queue and run them, pushing the result
|
||||||
|
onto the results queue. We also remove the host from the blocked
|
||||||
|
hosts list, to signify that they are ready for their next task.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if HAS_ATFORK:
|
||||||
|
atfork()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
task = None
|
||||||
|
try:
|
||||||
|
if not self._main_q.empty():
|
||||||
|
debug("there's work to be done!")
|
||||||
|
(host, task, job_vars, connection_info) = self._main_q.get(block=False)
|
||||||
|
debug("got a task/handler to work on: %s" % task)
|
||||||
|
|
||||||
|
new_connection_info = connection_info.set_task_override(task)
|
||||||
|
|
||||||
|
# execute the task and build a TaskResult from the result
|
||||||
|
debug("running TaskExecutor() for %s/%s" % (host, task))
|
||||||
|
executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._loader).run()
|
||||||
|
debug("done running TaskExecutor() for %s/%s" % (host, task))
|
||||||
|
task_result = TaskResult(host, task, executor_result)
|
||||||
|
|
||||||
|
# put the result on the result queue
|
||||||
|
debug("sending task result")
|
||||||
|
self._rslt_q.put(task_result, block=False)
|
||||||
|
debug("done sending task result")
|
||||||
|
|
||||||
|
else:
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
except Queue.Empty:
|
||||||
|
pass
|
||||||
|
except (IOError, EOFError, KeyboardInterrupt):
|
||||||
|
break
|
||||||
|
except AnsibleConnectionFailure:
|
||||||
|
try:
|
||||||
|
if task:
|
||||||
|
task_result = TaskResult(host, task, dict(unreachable=True))
|
||||||
|
self._rslt_q.put(task_result, block=False)
|
||||||
|
except:
|
||||||
|
# FIXME: most likely an abort, catch those kinds of errors specifically
|
||||||
|
break
|
||||||
|
except Exception, e:
|
||||||
|
debug("WORKER EXCEPTION: %s" % e)
|
||||||
|
debug("WORKER EXCEPTION: %s" % traceback.format_exc())
|
||||||
|
try:
|
||||||
|
if task:
|
||||||
|
task_result = TaskResult(host, task, dict(failed=True, exception=True, stdout=traceback.format_exc()))
|
||||||
|
self._rslt_q.put(task_result, block=False)
|
||||||
|
except:
|
||||||
|
# FIXME: most likely an abort, catch those kinds of errors specifically
|
||||||
|
break
|
||||||
|
|
||||||
|
debug("WORKER PROCESS EXITING")
|
||||||
|
|
||||||
|
|
|
@ -19,14 +19,196 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.executor.connection_info import ConnectionInformation
|
||||||
|
from ansible.plugins import lookup_loader, connection_loader, action_loader
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
__all__ = ['TaskExecutor']
|
||||||
|
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
class TaskExecutor:
|
class TaskExecutor:
|
||||||
|
|
||||||
def __init__(self, task, host):
|
'''
|
||||||
pass
|
This is the main worker class for the executor pipeline, which
|
||||||
|
handles loading an action plugin to actually dispatch the task to
|
||||||
|
a given host. This class roughly corresponds to the old Runner()
|
||||||
|
class.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, host, task, job_vars, connection_info, loader):
|
||||||
|
self._host = host
|
||||||
|
self._task = task
|
||||||
|
self._job_vars = job_vars
|
||||||
|
self._connection_info = connection_info
|
||||||
|
self._loader = loader
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
# returns TaskResult
|
'''
|
||||||
|
The main executor entrypoint, where we determine if the specified
|
||||||
|
task requires looping and either runs the task with
|
||||||
|
'''
|
||||||
|
|
||||||
|
debug("in run()")
|
||||||
|
items = self._get_loop_items()
|
||||||
|
if items:
|
||||||
|
if len(items) > 0:
|
||||||
|
item_results = self._run_loop(items)
|
||||||
|
res = dict(results=item_results)
|
||||||
|
else:
|
||||||
|
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
|
||||||
|
else:
|
||||||
|
debug("calling self._execute()")
|
||||||
|
res = self._execute()
|
||||||
|
debug("_execute() done")
|
||||||
|
|
||||||
|
debug("dumping result to json")
|
||||||
|
result = json.dumps(res)
|
||||||
|
debug("done dumping result, returning")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _get_loop_items(self):
|
||||||
|
'''
|
||||||
|
Loads a lookup plugin to handle the with_* portion of a task (if specified),
|
||||||
|
and returns the items result.
|
||||||
|
'''
|
||||||
|
|
||||||
|
items = None
|
||||||
|
if self._task.loop and self._task.loop in lookup_loader:
|
||||||
|
items = lookup_loader.get(self._task.loop).run(self._task.loop_args)
|
||||||
|
|
||||||
|
return items
|
||||||
|
|
||||||
|
def _run_loop(self, items):
|
||||||
|
'''
|
||||||
|
Runs the task with the loop items specified and collates the result
|
||||||
|
into an array named 'results' which is inserted into the final result
|
||||||
|
along with the item for which the loop ran.
|
||||||
|
'''
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
# FIXME: squash items into a flat list here for those modules
|
||||||
|
# which support it (yum, apt, etc.) but make it smarter
|
||||||
|
# than it is today?
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
res = self._execute()
|
||||||
|
res['item'] = item
|
||||||
|
results.append(res)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _execute(self):
|
||||||
|
'''
|
||||||
|
The primary workhorse of the executor system, this runs the task
|
||||||
|
on the specified host (which may be the delegated_to host) and handles
|
||||||
|
the retry/until and block rescue/always execution
|
||||||
|
'''
|
||||||
|
|
||||||
|
connection = self._get_connection()
|
||||||
|
handler = self._get_action_handler(connection=connection)
|
||||||
|
|
||||||
|
# check to see if this task should be skipped, due to it being a member of a
|
||||||
|
# role which has already run (and whether that role allows duplicate execution)
|
||||||
|
if self._task._role and self._task._role.has_run():
|
||||||
|
# If there is no metadata, the default behavior is to not allow duplicates,
|
||||||
|
# if there is metadata, check to see if the allow_duplicates flag was set to true
|
||||||
|
if self._task._role._metadata is None or self._task._role._metadata and not self._task._role._metadata.allow_duplicates:
|
||||||
|
debug("task belongs to a role which has already run, but does not allow duplicate execution")
|
||||||
|
return dict(skipped=True, skip_reason='This role has already been run, but does not allow duplicates')
|
||||||
|
|
||||||
|
if not self._task.evaluate_conditional(self._job_vars):
|
||||||
|
debug("when evaulation failed, skipping this task")
|
||||||
|
return dict(skipped=True, skip_reason='Conditional check failed')
|
||||||
|
|
||||||
|
if not self._task.evaluate_tags(self._connection_info.only_tags, self._connection_info.skip_tags):
|
||||||
|
debug("Tags don't match, skipping this task")
|
||||||
|
return dict(skipped=True, skip_reason='Skipped due to specified tags')
|
||||||
|
|
||||||
|
retries = self._task.retries
|
||||||
|
if retries <= 0:
|
||||||
|
retries = 1
|
||||||
|
|
||||||
|
delay = self._task.delay
|
||||||
|
if delay < 0:
|
||||||
|
delay = 0
|
||||||
|
|
||||||
|
debug("starting attempt loop")
|
||||||
|
result = None
|
||||||
|
for attempt in range(retries):
|
||||||
|
if attempt > 0:
|
||||||
|
# FIXME: this should use the callback mechanism
|
||||||
|
print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt))
|
||||||
|
result['attempts'] = attempt + 1
|
||||||
|
|
||||||
|
debug("running the handler")
|
||||||
|
result = handler.run(task_vars=self._job_vars)
|
||||||
|
debug("handler run complete")
|
||||||
|
if self._task.until:
|
||||||
|
# TODO: implement until logic (pseudo logic follows...)
|
||||||
|
# if VariableManager.check_conditional(cond, extra_vars=(dict(result=result))):
|
||||||
|
# break
|
||||||
pass
|
pass
|
||||||
|
elif 'failed' not in result and result.get('rc', 0) == 0:
|
||||||
|
# if the result is not failed, stop trying
|
||||||
|
break
|
||||||
|
|
||||||
|
if attempt < retries - 1:
|
||||||
|
time.sleep(delay)
|
||||||
|
|
||||||
|
debug("attempt loop complete, returning result")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _get_connection(self):
|
||||||
|
'''
|
||||||
|
Reads the connection property for the host, and returns the
|
||||||
|
correct connection object from the list of connection plugins
|
||||||
|
'''
|
||||||
|
|
||||||
|
# FIXME: delegate_to calculation should be done here
|
||||||
|
# FIXME: calculation of connection params/auth stuff should be done here
|
||||||
|
|
||||||
|
# FIXME: add all port/connection type munging here (accelerated mode,
|
||||||
|
# fixing up options for ssh, etc.)? and 'smart' conversion
|
||||||
|
conn_type = self._connection_info.connection
|
||||||
|
if conn_type == 'smart':
|
||||||
|
conn_type = 'ssh'
|
||||||
|
|
||||||
|
connection = connection_loader.get(conn_type, self._host, self._connection_info)
|
||||||
|
if not connection:
|
||||||
|
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
|
||||||
|
|
||||||
|
connection.connect()
|
||||||
|
|
||||||
|
return connection
|
||||||
|
|
||||||
|
def _get_action_handler(self, connection):
|
||||||
|
'''
|
||||||
|
Returns the correct action plugin to handle the requestion task action
|
||||||
|
'''
|
||||||
|
|
||||||
|
if self._task.action in action_loader:
|
||||||
|
if self._task.async != 0:
|
||||||
|
raise AnsibleError("async mode is not supported with the %s module" % module_name)
|
||||||
|
handler_name = self._task.action
|
||||||
|
elif self._task.async == 0:
|
||||||
|
handler_name = 'normal'
|
||||||
|
else:
|
||||||
|
handler_name = 'async'
|
||||||
|
|
||||||
|
handler = action_loader.get(
|
||||||
|
handler_name,
|
||||||
|
task=self._task,
|
||||||
|
connection=connection,
|
||||||
|
connection_info=self._connection_info,
|
||||||
|
loader=self._loader
|
||||||
|
)
|
||||||
|
if not handler:
|
||||||
|
raise AnsibleError("the handler '%s' was not found" % handler_name)
|
||||||
|
|
||||||
|
return handler
|
||||||
|
|
|
@ -19,18 +19,191 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
class TaskQueueManagerHostPlaybookIterator:
|
import multiprocessing
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
def __init__(self, host, playbook):
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.executor.connection_info import ConnectionInformation
|
||||||
|
#from ansible.executor.manager import AnsibleManager
|
||||||
|
from ansible.executor.play_iterator import PlayIterator
|
||||||
|
from ansible.executor.process.worker import WorkerProcess
|
||||||
|
from ansible.executor.process.result import ResultProcess
|
||||||
|
from ansible.plugins import callback_loader, strategy_loader
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
__all__ = ['TaskQueueManager']
|
||||||
|
|
||||||
|
|
||||||
|
class TaskQueueManager:
|
||||||
|
|
||||||
|
'''
|
||||||
|
This class handles the multiprocessing requirements of Ansible by
|
||||||
|
creating a pool of worker forks, a result handler fork, and a
|
||||||
|
manager object with shared datastructures/queues for coordinating
|
||||||
|
work between all processes.
|
||||||
|
|
||||||
|
The queue manager is responsible for loading the play strategy plugin,
|
||||||
|
which dispatches the Play's tasks to hosts.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, inventory, callback, variable_manager, loader, options):
|
||||||
|
|
||||||
|
self._inventory = inventory
|
||||||
|
self._variable_manager = variable_manager
|
||||||
|
self._loader = loader
|
||||||
|
self._options = options
|
||||||
|
|
||||||
|
# a special flag to help us exit cleanly
|
||||||
|
self._terminated = False
|
||||||
|
|
||||||
|
# create and start the multiprocessing manager
|
||||||
|
#self._manager = AnsibleManager()
|
||||||
|
#self._manager.start()
|
||||||
|
|
||||||
|
# this dictionary is used to keep track of notified handlers
|
||||||
|
self._notified_handlers = dict()
|
||||||
|
|
||||||
|
# dictionaries to keep track of failed/unreachable hosts
|
||||||
|
self._failed_hosts = dict()
|
||||||
|
self._unreachable_hosts = dict()
|
||||||
|
|
||||||
|
self._final_q = multiprocessing.Queue()
|
||||||
|
|
||||||
|
# FIXME: hard-coded the default callback plugin here, which
|
||||||
|
# should be configurable.
|
||||||
|
self._callback = callback_loader.get(callback)
|
||||||
|
|
||||||
|
# create the pool of worker threads, based on the number of forks specified
|
||||||
|
try:
|
||||||
|
fileno = sys.stdin.fileno()
|
||||||
|
except ValueError:
|
||||||
|
fileno = None
|
||||||
|
|
||||||
|
self._workers = []
|
||||||
|
for i in range(self._options.forks):
|
||||||
|
# duplicate stdin, if possible
|
||||||
|
new_stdin = None
|
||||||
|
if fileno is not None:
|
||||||
|
try:
|
||||||
|
new_stdin = os.fdopen(os.dup(fileno))
|
||||||
|
except OSError, e:
|
||||||
|
# couldn't dupe stdin, most likely because it's
|
||||||
|
# not a valid file descriptor, so we just rely on
|
||||||
|
# using the one that was passed in
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_next_task(self):
|
main_q = multiprocessing.Queue()
|
||||||
assert False
|
rslt_q = multiprocessing.Queue()
|
||||||
|
|
||||||
def is_blocked(self):
|
prc = WorkerProcess(self, main_q, rslt_q, loader, new_stdin)
|
||||||
# depending on strategy, either
|
prc.start()
|
||||||
# ‘linear’ -- all prev tasks must be completed for all hosts
|
|
||||||
# ‘free’ -- this host doesn’t have any more work to do
|
|
||||||
assert False
|
|
||||||
|
|
||||||
|
self._workers.append((prc, main_q, rslt_q))
|
||||||
|
|
||||||
|
self._result_prc = ResultProcess(self._final_q, self._workers)
|
||||||
|
self._result_prc.start()
|
||||||
|
|
||||||
|
def _initialize_notified_handlers(self, handlers):
|
||||||
|
'''
|
||||||
|
Clears and initializes the shared notified handlers dict with entries
|
||||||
|
for each handler in the play, which is an empty array that will contain
|
||||||
|
inventory hostnames for those hosts triggering the handler.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Zero the dictionary first by removing any entries there.
|
||||||
|
# Proxied dicts don't support iteritems, so we have to use keys()
|
||||||
|
for key in self._notified_handlers.keys():
|
||||||
|
del self._notified_handlers[key]
|
||||||
|
|
||||||
|
# FIXME: there is a block compile helper for this...
|
||||||
|
handler_list = []
|
||||||
|
for handler_block in handlers:
|
||||||
|
handler_list.extend(handler_block.compile())
|
||||||
|
|
||||||
|
# then initalize it with the handler names from the handler list
|
||||||
|
for handler in handler_list:
|
||||||
|
self._notified_handlers[handler.get_name()] = []
|
||||||
|
|
||||||
|
def run(self, play):
|
||||||
|
'''
|
||||||
|
Iterates over the roles/tasks in a play, using the given (or default)
|
||||||
|
strategy for queueing tasks. The default is the linear strategy, which
|
||||||
|
operates like classic Ansible by keeping all hosts in lock-step with
|
||||||
|
a given task (meaning no hosts move on to the next task until all hosts
|
||||||
|
are done with the current task).
|
||||||
|
'''
|
||||||
|
|
||||||
|
connection_info = ConnectionInformation(play, self._options)
|
||||||
|
self._callback.set_connection_info(connection_info)
|
||||||
|
|
||||||
|
# run final validation on the play now, to make sure fields are templated
|
||||||
|
# FIXME: is this even required? Everything is validated and merged at the
|
||||||
|
# task level, so else in the play needs to be templated
|
||||||
|
#all_vars = self._vmw.get_vars(loader=self._dlw, play=play)
|
||||||
|
#all_vars = self._vmw.get_vars(loader=self._loader, play=play)
|
||||||
|
#play.post_validate(all_vars=all_vars)
|
||||||
|
|
||||||
|
self._callback.playbook_on_play_start(play.name)
|
||||||
|
|
||||||
|
# initialize the shared dictionary containing the notified handlers
|
||||||
|
self._initialize_notified_handlers(play.handlers)
|
||||||
|
|
||||||
|
# load the specified strategy (or the default linear one)
|
||||||
|
strategy = strategy_loader.get(play.strategy, self)
|
||||||
|
if strategy is None:
|
||||||
|
raise AnsibleError("Invalid play strategy specified: %s" % play.strategy, obj=play._ds)
|
||||||
|
|
||||||
|
# build the iterator
|
||||||
|
iterator = PlayIterator(inventory=self._inventory, play=play)
|
||||||
|
|
||||||
|
# and run the play using the strategy
|
||||||
|
return strategy.run(iterator, connection_info)
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
debug("RUNNING CLEANUP")
|
||||||
|
|
||||||
|
self.terminate()
|
||||||
|
|
||||||
|
self._final_q.close()
|
||||||
|
self._result_prc.terminate()
|
||||||
|
|
||||||
|
for (worker_prc, main_q, rslt_q) in self._workers:
|
||||||
|
rslt_q.close()
|
||||||
|
main_q.close()
|
||||||
|
worker_prc.terminate()
|
||||||
|
|
||||||
|
def get_inventory(self):
|
||||||
|
return self._inventory
|
||||||
|
|
||||||
|
def get_callback(self):
|
||||||
|
return self._callback
|
||||||
|
|
||||||
|
def get_variable_manager(self):
|
||||||
|
return self._variable_manager
|
||||||
|
|
||||||
|
def get_loader(self):
|
||||||
|
return self._loader
|
||||||
|
|
||||||
|
def get_server_pipe(self):
|
||||||
|
return self._server_pipe
|
||||||
|
|
||||||
|
def get_client_pipe(self):
|
||||||
|
return self._client_pipe
|
||||||
|
|
||||||
|
def get_pending_results(self):
|
||||||
|
return self._pending_results
|
||||||
|
|
||||||
|
def get_allow_processing(self):
|
||||||
|
return self._allow_processing
|
||||||
|
|
||||||
|
def get_notified_handlers(self):
|
||||||
|
return self._notified_handlers
|
||||||
|
|
||||||
|
def get_workers(self):
|
||||||
|
return self._workers[:]
|
||||||
|
|
||||||
|
def terminate(self):
|
||||||
|
self._terminated = True
|
||||||
|
|
|
@ -19,3 +19,39 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.parsing import DataLoader
|
||||||
|
|
||||||
|
class TaskResult:
|
||||||
|
'''
|
||||||
|
This class is responsible for interpretting the resulting data
|
||||||
|
from an executed task, and provides helper methods for determining
|
||||||
|
the result of a given task.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, host, task, return_data):
|
||||||
|
self._host = host
|
||||||
|
self._task = task
|
||||||
|
if isinstance(return_data, dict):
|
||||||
|
self._result = return_data.copy()
|
||||||
|
else:
|
||||||
|
self._result = DataLoader().load(return_data)
|
||||||
|
|
||||||
|
def is_changed(self):
|
||||||
|
return self._check_key('changed')
|
||||||
|
|
||||||
|
def is_skipped(self):
|
||||||
|
return self._check_key('skipped')
|
||||||
|
|
||||||
|
def is_failed(self):
|
||||||
|
return self._check_key('failed') or self._result.get('rc', 0) != 0
|
||||||
|
|
||||||
|
def is_unreachable(self):
|
||||||
|
return self._check_key('unreachable')
|
||||||
|
|
||||||
|
def _check_key(self, key):
|
||||||
|
if 'results' in self._result:
|
||||||
|
flag = False
|
||||||
|
for res in self._result.get('results', []):
|
||||||
|
flag |= res.get(key, False)
|
||||||
|
else:
|
||||||
|
return self._result.get(key, False)
|
||||||
|
|
|
@ -16,397 +16,661 @@
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#############################################
|
#############################################
|
||||||
|
import fnmatch
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
import stat
|
||||||
|
import subprocess
|
||||||
|
|
||||||
# Make coding more python3-ish
|
from ansible import constants as C
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from ansible.errors import *
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
### List of things to change in Inventory
|
from ansible.inventory.ini import InventoryParser
|
||||||
|
from ansible.inventory.script import InventoryScript
|
||||||
|
from ansible.inventory.dir import InventoryDirectory
|
||||||
|
from ansible.inventory.group import Group
|
||||||
|
from ansible.inventory.host import Host
|
||||||
|
from ansible.plugins import vars_loader
|
||||||
|
from ansible.utils.vars import combine_vars
|
||||||
|
|
||||||
### Replace some lists with sets/frozensets.
|
# FIXME: these defs need to be somewhere else
|
||||||
### Check where this makes sense to reveal externally
|
def is_executable(path):
|
||||||
|
'''is the given path executable?'''
|
||||||
|
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
|
||||||
|
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
|
||||||
|
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
|
||||||
|
|
||||||
### Rename all caches to *_cache
|
class Inventory(object):
|
||||||
|
"""
|
||||||
|
Host inventory for ansible.
|
||||||
|
"""
|
||||||
|
|
||||||
### Standardize how caches are flushed for all caches if possible
|
#__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
|
||||||
|
# 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
|
||||||
|
# '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
|
||||||
|
|
||||||
### Think about whether retrieving variables should be methods of the
|
def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
|
||||||
### Groups/Hosts being queried with caches at that level
|
|
||||||
|
|
||||||
### Store things into a VarManager instead of inventory
|
# the host file file, or script path, or list of hosts
|
||||||
|
# if a list, inventory data will NOT be loaded
|
||||||
|
self.host_list = host_list
|
||||||
|
self._loader = loader
|
||||||
|
self._variable_manager = variable_manager
|
||||||
|
|
||||||
### Merge list_hosts() and get_hosts()
|
# caching to avoid repeated calculations, particularly with
|
||||||
### Merge list_groups() and groups_list()
|
# external inventory scripts.
|
||||||
### Merge get_variables() and get_host_variables()
|
|
||||||
|
|
||||||
### Restrictions:
|
self._vars_per_host = {}
|
||||||
### Remove get_restriction()
|
self._vars_per_group = {}
|
||||||
### Prefix restrict_to and lift_restriction with _ and note in docstring that
|
self._hosts_cache = {}
|
||||||
### only playbook is to use these for implementing failed hosts. This is
|
self._groups_list = {}
|
||||||
### the closest that python has to a "friend function"
|
self._pattern_cache = {}
|
||||||
### Can we get rid of restrictions altogether?
|
|
||||||
### If we must keep restrictions, reimplement as a stack of sets. Then
|
|
||||||
### calling code will push and pop restrictions onto the inventory
|
|
||||||
### (mpdehaan +1'd stack idea)
|
|
||||||
|
|
||||||
### is_file() and basedir() => Change to properties
|
# to be set by calling set_playbook_basedir by playbook code
|
||||||
|
self._playbook_basedir = None
|
||||||
|
|
||||||
### Can we move the playbook variable resolving to someplace else? Seems that:
|
# the inventory object holds a list of groups
|
||||||
### 1) It can change within a single session
|
self.groups = []
|
||||||
### 2) Inventory shouldn't know about playbook.
|
|
||||||
### Possibilities:
|
# a list of host(names) to contain current inquiries to
|
||||||
### Host and groups read the host_vars and group_vars. Both inventory and
|
self._restriction = None
|
||||||
### playbook register paths that the hsot_vars and group_vars can read from.
|
self._also_restriction = None
|
||||||
### The VariableManager reads the host_vars and group_vars and keeps them
|
self._subset = None
|
||||||
### layered depending on the context from which it's being asked what
|
|
||||||
### the value of a variable is
|
if isinstance(host_list, basestring):
|
||||||
### Either of these results in getting rid of/moving to another class
|
if "," in host_list:
|
||||||
### Inventory.playbook_basedir() and Inventory.set_playbook_basedir()
|
host_list = host_list.split(",")
|
||||||
### mpdehaan: evaluate caching and make sure we're just caching once. (Toshio: tie
|
host_list = [ h for h in host_list if h and h.strip() ]
|
||||||
### this in with storing and retrieving variables via Host and Group objects
|
|
||||||
### mpdehaan: If it's possible, move templating entirely out of inventory
|
if host_list is None:
|
||||||
### (Toshio: If it's possible, implement this by storing inside of
|
self.parser = None
|
||||||
### VariableManager which will handle resolving templated variables)
|
elif isinstance(host_list, list):
|
||||||
|
self.parser = None
|
||||||
|
all = Group('all')
|
||||||
|
self.groups = [ all ]
|
||||||
|
ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
|
||||||
|
for x in host_list:
|
||||||
|
m = ipv6_re.match(x)
|
||||||
|
if m:
|
||||||
|
all.add_host(Host(m.groups()[0], m.groups()[1]))
|
||||||
|
else:
|
||||||
|
if ":" in x:
|
||||||
|
tokens = x.rsplit(":", 1)
|
||||||
|
# if there is ':' in the address, then this is an ipv6
|
||||||
|
if ':' in tokens[0]:
|
||||||
|
all.add_host(Host(x))
|
||||||
|
else:
|
||||||
|
all.add_host(Host(tokens[0], tokens[1]))
|
||||||
|
else:
|
||||||
|
all.add_host(Host(x))
|
||||||
|
elif os.path.exists(host_list):
|
||||||
|
if os.path.isdir(host_list):
|
||||||
|
# Ensure basedir is inside the directory
|
||||||
|
self.host_list = os.path.join(self.host_list, "")
|
||||||
|
self.parser = InventoryDirectory(filename=host_list)
|
||||||
|
self.groups = self.parser.groups.values()
|
||||||
|
else:
|
||||||
|
# check to see if the specified file starts with a
|
||||||
|
# shebang (#!/), so if an error is raised by the parser
|
||||||
|
# class we can show a more apropos error
|
||||||
|
shebang_present = False
|
||||||
|
try:
|
||||||
|
inv_file = open(host_list)
|
||||||
|
first_line = inv_file.readlines()[0]
|
||||||
|
inv_file.close()
|
||||||
|
if first_line.startswith('#!'):
|
||||||
|
shebang_present = True
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# FIXME: utils is_executable
|
||||||
|
if is_executable(host_list):
|
||||||
|
try:
|
||||||
|
self.parser = InventoryScript(filename=host_list)
|
||||||
|
self.groups = self.parser.groups.values()
|
||||||
|
except:
|
||||||
|
if not shebang_present:
|
||||||
|
raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \
|
||||||
|
"If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
self.parser = InventoryParser(filename=host_list)
|
||||||
|
self.groups = self.parser.groups.values()
|
||||||
|
except:
|
||||||
|
if shebang_present:
|
||||||
|
raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \
|
||||||
|
"Perhaps you want to correct this with `chmod +x %s`?" % host_list)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
vars_loader.add_directory(self.basedir(), with_subdir=True)
|
||||||
|
else:
|
||||||
|
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
|
||||||
|
|
||||||
|
self._vars_plugins = [ x for x in vars_loader.all(self) ]
|
||||||
|
|
||||||
|
# FIXME: shouldn't be required, since the group/host vars file
|
||||||
|
# management will be done in VariableManager
|
||||||
|
# get group vars from group_vars/ files and vars plugins
|
||||||
|
for group in self.groups:
|
||||||
|
# FIXME: combine_vars
|
||||||
|
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
|
||||||
|
|
||||||
|
# get host vars from host_vars/ files and vars plugins
|
||||||
|
for host in self.get_hosts():
|
||||||
|
# FIXME: combine_vars
|
||||||
|
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
|
||||||
|
|
||||||
|
|
||||||
### Questiony things:
|
def _match(self, str, pattern_str):
|
||||||
### Do we want patterns to apply to both groups and hosts or only to hosts?
|
try:
|
||||||
### jimi-c: Current code should do both as we're parsing things you can
|
if pattern_str.startswith('~'):
|
||||||
### give to the -i commandline switch which can mix hosts and groups.
|
return re.search(pattern_str[1:], str)
|
||||||
### like: `hosts: group1:group2&host3`
|
else:
|
||||||
### toshio: should we move parsing the commandline out and then have that
|
return fnmatch.fnmatch(str, pattern_str)
|
||||||
### cli parser pass in a distinct list of hosts to add?
|
except Exception, e:
|
||||||
### Think about whether we could and want to go through the pattern_cache for
|
raise errors.AnsibleError('invalid host pattern: %s' % pattern_str)
|
||||||
### standard lookups
|
|
||||||
### Is this the current architecture:
|
|
||||||
### We have a single Inventory per runner.
|
|
||||||
### The Inventory may be initialized via:
|
|
||||||
### an ini file
|
|
||||||
### a directory of ini files
|
|
||||||
### a script
|
|
||||||
### a , separated string of hosts
|
|
||||||
### a list of hosts
|
|
||||||
### host_vars/*
|
|
||||||
### group_vars/*
|
|
||||||
### Do we want to change this so that multiple sources are allowed?
|
|
||||||
### ansible -i /etc/ansible,./inventory,/opt/ansible/inventory_plugins/ec2.py,localhost
|
|
||||||
### jimi-c: We don't currently have multiple inventory sources explicitly
|
|
||||||
### allowed but you can specify an inventory directory and then have multiple
|
|
||||||
### sources inside of that.
|
|
||||||
### toshio: So do we want to make that available to people since we have to do it anyway?
|
|
||||||
### jimi-c: Also, what calls Inventory? TaskExecutor probably makes sense in v2
|
|
||||||
### What are vars_loaders? What's their scope? Why aren't the parsing of
|
|
||||||
### inventory files and scripts implemented as a vars_loader?
|
|
||||||
### jimi-c: vars_loaders are plugins to do additional variable loading.
|
|
||||||
### svg has some inhouse.
|
|
||||||
### Could theoretically rewrite the current loading to be handled by a plugin
|
|
||||||
### If we have add_group(), why no merge_group()?
|
|
||||||
### group = inven.get_group(name)
|
|
||||||
### if not group:
|
|
||||||
### group = Group(name)
|
|
||||||
### inven.add_group(group)
|
|
||||||
###
|
|
||||||
### vs
|
|
||||||
### group = Group(name)
|
|
||||||
### try:
|
|
||||||
### inven.add_group(group)
|
|
||||||
### except:
|
|
||||||
### inven.merge_group(group)
|
|
||||||
###
|
|
||||||
### vs:
|
|
||||||
### group = Group(name)
|
|
||||||
### inven.add_or_merge(group)
|
|
||||||
|
|
||||||
from .. plugins.inventory.aggregate import InventoryAggregateParser
|
def _match_list(self, items, item_attr, pattern_str):
|
||||||
from . group import Group
|
results = []
|
||||||
from . host import Host
|
try:
|
||||||
|
if not pattern_str.startswith('~'):
|
||||||
class Inventory:
|
pattern = re.compile(fnmatch.translate(pattern_str))
|
||||||
'''
|
else:
|
||||||
Create hosts and groups from inventory
|
pattern = re.compile(pattern_str[1:])
|
||||||
|
except Exception, e:
|
||||||
Retrieve the hosts and groups that ansible knows about from this
|
raise errors.AnsibleError('invalid host pattern: %s' % pattern_str)
|
||||||
class.
|
|
||||||
|
|
||||||
Retrieve raw variables (non-expanded) from the Group and Host classes
|
|
||||||
returned from here.
|
|
||||||
'''
|
|
||||||
def __init__(self, inventory_list=C.DEFAULT_HOST_LIST, vault_password=None):
|
|
||||||
'''
|
|
||||||
:kwarg inventory_list: A list of inventory sources. This may be file
|
|
||||||
names which will be parsed as ini-like files, executable scripts
|
|
||||||
which return inventory data as json, directories of both of the above,
|
|
||||||
or hostnames. Files and directories are
|
|
||||||
:kwarg vault_password: Password to use if any of the inventory sources
|
|
||||||
are in an ansible vault
|
|
||||||
'''
|
|
||||||
self.vault_password = vault_password
|
|
||||||
|
|
||||||
self.parser = InventoryAggregateParser(inventory_list)
|
|
||||||
self.parser.parse()
|
|
||||||
self.hosts = self.parser.hosts
|
|
||||||
self.groups = self.parser.groups
|
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
if pattern.match(getattr(item, item_attr)):
|
||||||
|
results.append(item)
|
||||||
|
return results
|
||||||
|
|
||||||
def get_hosts(self, pattern="all"):
|
def get_hosts(self, pattern="all"):
|
||||||
'''
|
"""
|
||||||
Find all hosts matching a pattern string
|
find all host names matching a pattern string, taking into account any inventory restrictions or
|
||||||
|
applied subsets.
|
||||||
|
"""
|
||||||
|
|
||||||
This also takes into account any inventory restrictions or applied
|
# process patterns
|
||||||
subsets.
|
if isinstance(pattern, list):
|
||||||
|
pattern = ';'.join(pattern)
|
||||||
|
patterns = pattern.replace(";",":").split(":")
|
||||||
|
hosts = self._get_hosts(patterns)
|
||||||
|
|
||||||
:kwarg pattern: An fnmatch pattern that hosts must match on. Multiple
|
# exclude hosts not in a subset, if defined
|
||||||
patterns may be separated by ";" and ":". Defaults to the special
|
if self._subset:
|
||||||
pattern "all" which means to return all hosts.
|
subset = self._get_hosts(self._subset)
|
||||||
:returns: list of hosts
|
hosts = [ h for h in hosts if h in subset ]
|
||||||
'''
|
|
||||||
pass
|
# exclude hosts mentioned in any restriction (ex: failed hosts)
|
||||||
|
if self._restriction is not None:
|
||||||
|
hosts = [ h for h in hosts if h in self._restriction ]
|
||||||
|
if self._also_restriction is not None:
|
||||||
|
hosts = [ h for h in hosts if h in self._also_restriction ]
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
def _get_hosts(self, patterns):
|
||||||
|
"""
|
||||||
|
finds hosts that match a list of patterns. Handles negative
|
||||||
|
matches as well as intersection matches.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Host specifiers should be sorted to ensure consistent behavior
|
||||||
|
pattern_regular = []
|
||||||
|
pattern_intersection = []
|
||||||
|
pattern_exclude = []
|
||||||
|
for p in patterns:
|
||||||
|
if p.startswith("!"):
|
||||||
|
pattern_exclude.append(p)
|
||||||
|
elif p.startswith("&"):
|
||||||
|
pattern_intersection.append(p)
|
||||||
|
elif p:
|
||||||
|
pattern_regular.append(p)
|
||||||
|
|
||||||
|
# if no regular pattern was given, hence only exclude and/or intersection
|
||||||
|
# make that magically work
|
||||||
|
if pattern_regular == []:
|
||||||
|
pattern_regular = ['all']
|
||||||
|
|
||||||
|
# when applying the host selectors, run those without the "&" or "!"
|
||||||
|
# first, then the &s, then the !s.
|
||||||
|
patterns = pattern_regular + pattern_intersection + pattern_exclude
|
||||||
|
|
||||||
|
hosts = []
|
||||||
|
|
||||||
|
for p in patterns:
|
||||||
|
# avoid resolving a pattern that is a plain host
|
||||||
|
if p in self._hosts_cache:
|
||||||
|
hosts.append(self.get_host(p))
|
||||||
|
else:
|
||||||
|
that = self.__get_hosts(p)
|
||||||
|
if p.startswith("!"):
|
||||||
|
hosts = [ h for h in hosts if h not in that ]
|
||||||
|
elif p.startswith("&"):
|
||||||
|
hosts = [ h for h in hosts if h in that ]
|
||||||
|
else:
|
||||||
|
to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ]
|
||||||
|
hosts.extend(to_append)
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
def __get_hosts(self, pattern):
|
||||||
|
"""
|
||||||
|
finds hosts that positively match a particular pattern. Does not
|
||||||
|
take into account negative matches.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if pattern in self._pattern_cache:
|
||||||
|
return self._pattern_cache[pattern]
|
||||||
|
|
||||||
|
(name, enumeration_details) = self._enumeration_info(pattern)
|
||||||
|
hpat = self._hosts_in_unenumerated_pattern(name)
|
||||||
|
result = self._apply_ranges(pattern, hpat)
|
||||||
|
self._pattern_cache[pattern] = result
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _enumeration_info(self, pattern):
|
||||||
|
"""
|
||||||
|
returns (pattern, limits) taking a regular pattern and finding out
|
||||||
|
which parts of it correspond to start/stop offsets. limits is
|
||||||
|
a tuple of (start, stop) or None
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Do not parse regexes for enumeration info
|
||||||
|
if pattern.startswith('~'):
|
||||||
|
return (pattern, None)
|
||||||
|
|
||||||
|
# The regex used to match on the range, which can be [x] or [x-y].
|
||||||
|
pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
|
||||||
|
m = pattern_re.match(pattern)
|
||||||
|
if m:
|
||||||
|
(target, first, last, rest) = m.groups()
|
||||||
|
first = int(first)
|
||||||
|
if last:
|
||||||
|
if first < 0:
|
||||||
|
raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
|
||||||
|
last = int(last)
|
||||||
|
else:
|
||||||
|
last = first
|
||||||
|
return (target, (first, last))
|
||||||
|
else:
|
||||||
|
return (pattern, None)
|
||||||
|
|
||||||
|
def _apply_ranges(self, pat, hosts):
|
||||||
|
"""
|
||||||
|
given a pattern like foo, that matches hosts, return all of hosts
|
||||||
|
given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
|
||||||
|
"""
|
||||||
|
|
||||||
|
# If there are no hosts to select from, just return the
|
||||||
|
# empty set. This prevents trying to do selections on an empty set.
|
||||||
|
# issue#6258
|
||||||
|
if not hosts:
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
(loose_pattern, limits) = self._enumeration_info(pat)
|
||||||
|
if not limits:
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
(left, right) = limits
|
||||||
|
|
||||||
|
if left == '':
|
||||||
|
left = 0
|
||||||
|
if right == '':
|
||||||
|
right = 0
|
||||||
|
left=int(left)
|
||||||
|
right=int(right)
|
||||||
|
try:
|
||||||
|
if left != right:
|
||||||
|
return hosts[left:right]
|
||||||
|
else:
|
||||||
|
return [ hosts[left] ]
|
||||||
|
except IndexError:
|
||||||
|
raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
|
||||||
|
|
||||||
|
def _create_implicit_localhost(self, pattern):
|
||||||
|
new_host = Host(pattern)
|
||||||
|
new_host.set_variable("ansible_python_interpreter", sys.executable)
|
||||||
|
new_host.set_variable("ansible_connection", "local")
|
||||||
|
new_host.ipv4_address = '127.0.0.1'
|
||||||
|
|
||||||
|
ungrouped = self.get_group("ungrouped")
|
||||||
|
if ungrouped is None:
|
||||||
|
self.add_group(Group('ungrouped'))
|
||||||
|
ungrouped = self.get_group('ungrouped')
|
||||||
|
self.get_group('all').add_child_group(ungrouped)
|
||||||
|
ungrouped.add_host(new_host)
|
||||||
|
return new_host
|
||||||
|
|
||||||
|
def _hosts_in_unenumerated_pattern(self, pattern):
|
||||||
|
""" Get all host names matching the pattern """
|
||||||
|
|
||||||
|
results = []
|
||||||
|
hosts = []
|
||||||
|
hostnames = set()
|
||||||
|
|
||||||
|
# ignore any negative checks here, this is handled elsewhere
|
||||||
|
pattern = pattern.replace("!","").replace("&", "")
|
||||||
|
|
||||||
|
def __append_host_to_results(host):
|
||||||
|
if host not in results and host.name not in hostnames:
|
||||||
|
hostnames.add(host.name)
|
||||||
|
results.append(host)
|
||||||
|
|
||||||
|
groups = self.get_groups()
|
||||||
|
for group in groups:
|
||||||
|
if pattern == 'all':
|
||||||
|
for host in group.get_hosts():
|
||||||
|
__append_host_to_results(host)
|
||||||
|
else:
|
||||||
|
if self._match(group.name, pattern):
|
||||||
|
for host in group.get_hosts():
|
||||||
|
__append_host_to_results(host)
|
||||||
|
else:
|
||||||
|
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
|
||||||
|
for host in matching_hosts:
|
||||||
|
__append_host_to_results(host)
|
||||||
|
|
||||||
|
if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
|
||||||
|
new_host = self._create_implicit_localhost(pattern)
|
||||||
|
results.append(new_host)
|
||||||
|
return results
|
||||||
|
|
||||||
def clear_pattern_cache(self):
|
def clear_pattern_cache(self):
|
||||||
'''
|
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
|
||||||
Invalidate the pattern cache
|
self._pattern_cache = {}
|
||||||
'''
|
|
||||||
#### Possibly not needed?
|
|
||||||
# Former docstring:
|
|
||||||
# Called exclusively by the add_host plugin to allow patterns to be
|
|
||||||
# recalculated
|
|
||||||
pass
|
|
||||||
|
|
||||||
def groups_for_host(self, host):
|
def groups_for_host(self, host):
|
||||||
### Remove in favour of
|
if host in self._hosts_cache:
|
||||||
### inventory.hosts[host].groups.keys()
|
return self._hosts_cache[host].get_groups()
|
||||||
'''
|
else:
|
||||||
Return the groupnames to which a host belongs
|
return []
|
||||||
|
|
||||||
:arg host: Name of host to lookup
|
|
||||||
:returns: list of groupnames
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def groups_list(self):
|
def groups_list(self):
|
||||||
'''
|
if not self._groups_list:
|
||||||
Return a mapping of group name to hostnames which belong to the group
|
groups = {}
|
||||||
|
for g in self.groups:
|
||||||
:returns: dict of groupnames mapped to a list of hostnames within that group
|
groups[g.name] = [h.name for h in g.get_hosts()]
|
||||||
'''
|
ancestors = g.get_ancestors()
|
||||||
pass
|
for a in ancestors:
|
||||||
|
if a.name not in groups:
|
||||||
|
groups[a.name] = [h.name for h in a.get_hosts()]
|
||||||
|
self._groups_list = groups
|
||||||
|
return self._groups_list
|
||||||
|
|
||||||
def get_groups(self):
|
def get_groups(self):
|
||||||
### Remove in favour of inventory.groups.values()
|
return self.groups
|
||||||
'''
|
|
||||||
Retrieve the Group objects known to the Inventory
|
|
||||||
|
|
||||||
:returns: list of :class:`Group`s belonging to the Inventory
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_host(self, hostname):
|
def get_host(self, hostname):
|
||||||
### Remove in favour of inventory.hosts.values()
|
if hostname not in self._hosts_cache:
|
||||||
'''
|
self._hosts_cache[hostname] = self._get_host(hostname)
|
||||||
Retrieve the Host object for a hostname
|
return self._hosts_cache[hostname]
|
||||||
|
|
||||||
:arg hostname: hostname associated with the :class:`Host`
|
def _get_host(self, hostname):
|
||||||
:returns: :class:`Host` object whose hostname was requested
|
if hostname in ['localhost','127.0.0.1']:
|
||||||
'''
|
for host in self.get_group('all').get_hosts():
|
||||||
pass
|
if host.name in ['localhost', '127.0.0.1']:
|
||||||
|
return host
|
||||||
|
return self._create_implicit_localhost(hostname)
|
||||||
|
else:
|
||||||
|
for group in self.groups:
|
||||||
|
for host in group.get_hosts():
|
||||||
|
if hostname == host.name:
|
||||||
|
return host
|
||||||
|
return None
|
||||||
|
|
||||||
def get_group(self, groupname):
|
def get_group(self, groupname):
|
||||||
### Revmoe in favour of inventory.groups.groupname
|
for group in self.groups:
|
||||||
'''
|
if group.name == groupname:
|
||||||
Retrieve the Group object for a groupname
|
return group
|
||||||
|
return None
|
||||||
:arg groupname: groupname associated with the :class:`Group`
|
|
||||||
:returns: :class:`Group` object whose groupname was requested
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_group_variables(self, groupname, update_cached=False, vault_password=None):
|
def get_group_variables(self, groupname, update_cached=False, vault_password=None):
|
||||||
### Remove in favour of inventory.groups[groupname].get_vars()
|
if groupname not in self._vars_per_group or update_cached:
|
||||||
'''
|
self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password)
|
||||||
Retrieve the variables set on a group
|
return self._vars_per_group[groupname]
|
||||||
|
|
||||||
:arg groupname: groupname to retrieve variables for
|
def _get_group_variables(self, groupname, vault_password=None):
|
||||||
:kwarg update_cached: if True, retrieve the variables from the source
|
|
||||||
and refresh the cache for this variable
|
group = self.get_group(groupname)
|
||||||
:kwarg vault_password: Password to use if any of the inventory sources
|
if group is None:
|
||||||
are in an ansible vault
|
raise Exception("group not found: %s" % groupname)
|
||||||
:returns: dict mapping group variable names to values
|
|
||||||
'''
|
vars = {}
|
||||||
pass
|
|
||||||
|
# plugin.get_group_vars retrieves just vars for specific group
|
||||||
|
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
|
||||||
|
for updated in vars_results:
|
||||||
|
if updated is not None:
|
||||||
|
# FIXME: combine_vars
|
||||||
|
vars = combine_vars(vars, updated)
|
||||||
|
|
||||||
|
# Read group_vars/ files
|
||||||
|
# FIXME: combine_vars
|
||||||
|
vars = combine_vars(vars, self.get_group_vars(group))
|
||||||
|
|
||||||
|
return vars
|
||||||
|
|
||||||
def get_variables(self, hostname, update_cached=False, vault_password=None):
|
def get_variables(self, hostname, update_cached=False, vault_password=None):
|
||||||
### Remove in favour of inventory.hosts[hostname].get_vars()
|
|
||||||
'''
|
|
||||||
Retrieve the variables set on a host
|
|
||||||
|
|
||||||
:arg hostname: hostname to retrieve variables for
|
host = self.get_host(hostname)
|
||||||
:kwarg update_cached: if True, retrieve the variables from the source
|
if not host:
|
||||||
and refresh the cache for this variable
|
raise Exception("host not found: %s" % hostname)
|
||||||
:kwarg vault_password: Password to use if any of the inventory sources
|
return host.get_variables()
|
||||||
are in an ansible vault
|
|
||||||
:returns: dict mapping host variable names to values
|
|
||||||
'''
|
|
||||||
### WARNING: v1 implementation ignores update_cached and vault_password
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
|
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
|
||||||
### Remove in favour of inventory.hosts[hostname].get_vars()
|
|
||||||
'''
|
|
||||||
Retrieve the variables set on a host
|
|
||||||
|
|
||||||
:arg hostname: hostname to retrieve variables for
|
if hostname not in self._vars_per_host or update_cached:
|
||||||
:kwarg update_cached: if True, retrieve the variables from the source
|
self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password)
|
||||||
and refresh the cache for this variable
|
return self._vars_per_host[hostname]
|
||||||
:kwarg vault_password: Password to use if any of the inventory sources
|
|
||||||
are in an ansible vault
|
def _get_host_variables(self, hostname, vault_password=None):
|
||||||
:returns: dict mapping host variable names to values
|
|
||||||
'''
|
host = self.get_host(hostname)
|
||||||
pass
|
if host is None:
|
||||||
|
raise errors.AnsibleError("host not found: %s" % hostname)
|
||||||
|
|
||||||
|
vars = {}
|
||||||
|
|
||||||
|
# plugin.run retrieves all vars (also from groups) for host
|
||||||
|
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
|
||||||
|
for updated in vars_results:
|
||||||
|
if updated is not None:
|
||||||
|
# FIXME: combine_vars
|
||||||
|
vars = combine_vars(vars, updated)
|
||||||
|
|
||||||
|
# plugin.get_host_vars retrieves just vars for specific host
|
||||||
|
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
|
||||||
|
for updated in vars_results:
|
||||||
|
if updated is not None:
|
||||||
|
# FIXME: combine_vars
|
||||||
|
vars = combine_vars(vars, updated)
|
||||||
|
|
||||||
|
# still need to check InventoryParser per host vars
|
||||||
|
# which actually means InventoryScript per host,
|
||||||
|
# which is not performant
|
||||||
|
if self.parser is not None:
|
||||||
|
# FIXME: combine_vars
|
||||||
|
vars = combine_vars(vars, self.parser.get_host_variables(host))
|
||||||
|
|
||||||
|
# Read host_vars/ files
|
||||||
|
# FIXME: combine_vars
|
||||||
|
vars = combine_vars(vars, self.get_host_vars(host))
|
||||||
|
|
||||||
|
return vars
|
||||||
|
|
||||||
def add_group(self, group):
|
def add_group(self, group):
|
||||||
### Possibly remove in favour of inventory.groups[groupname] = group
|
if group.name not in self.groups_list():
|
||||||
'''
|
self.groups.append(group)
|
||||||
Add a new group to the inventory
|
self._groups_list = None # invalidate internal cache
|
||||||
|
else:
|
||||||
:arg group: Group object to add to the inventory
|
raise errors.AnsibleError("group already in inventory: %s" % group.name)
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def list_hosts(self, pattern="all"):
|
def list_hosts(self, pattern="all"):
|
||||||
### Remove in favour of: inventory.hosts.keys()? Maybe not as pattern is here
|
|
||||||
'''
|
|
||||||
Retrieve a list of hostnames for a pattern
|
|
||||||
|
|
||||||
:kwarg pattern: Retrieve hosts which match this pattern. The special
|
""" return a list of hostnames for a pattern """
|
||||||
pattern "all" matches every host the inventory knows about.
|
|
||||||
:returns: list of hostnames
|
result = [ h for h in self.get_hosts(pattern) ]
|
||||||
'''
|
if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
|
||||||
### Notes: Differences with get_hosts:
|
result = [pattern]
|
||||||
### get_hosts returns hosts, this returns host names
|
return result
|
||||||
### This adds the implicit localhost/127.0.0.1 as a name but not as
|
|
||||||
### a host
|
|
||||||
pass
|
|
||||||
|
|
||||||
def list_groups(self):
|
def list_groups(self):
|
||||||
### Remove in favour of: inventory.groups.keys()
|
return sorted([ g.name for g in self.groups ], key=lambda x: x)
|
||||||
'''
|
|
||||||
Retrieve list of groupnames
|
|
||||||
:returns: list of groupnames
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_restriction(self):
|
def restrict_to_hosts(self, restriction):
|
||||||
'''
|
"""
|
||||||
Accessor for the private _restriction attribute.
|
Restrict list operations to the hosts given in restriction. This is used
|
||||||
'''
|
to exclude failed hosts in main playbook code, don't use this for other
|
||||||
### Note: In v1, says to be removed.
|
reasons.
|
||||||
### Not used by anything at all.
|
"""
|
||||||
pass
|
if not isinstance(restriction, list):
|
||||||
|
restriction = [ restriction ]
|
||||||
def restrict_to(self, restriction):
|
self._restriction = restriction
|
||||||
'''
|
|
||||||
Restrict get and list operations to hosts given in the restriction
|
|
||||||
|
|
||||||
:arg restriction:
|
|
||||||
'''
|
|
||||||
### The v1 docstring says:
|
|
||||||
### Used by the main playbook code to exclude failed hosts, don't use
|
|
||||||
### this for other reasons
|
|
||||||
pass
|
|
||||||
|
|
||||||
def lift_restriction(self):
|
|
||||||
'''
|
|
||||||
Remove a restriction
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def also_restrict_to(self, restriction):
|
def also_restrict_to(self, restriction):
|
||||||
'''
|
"""
|
||||||
Restrict get and list operations to hosts in the additional restriction
|
Works like restict_to but offers an additional restriction. Playbooks use this
|
||||||
'''
|
to implement serial behavior.
|
||||||
### Need to explore use case here -- maybe we want to restrict for
|
"""
|
||||||
### several different reasons. Within a certain scope we restrict
|
if not isinstance(restriction, list):
|
||||||
### again for a separate reason?
|
restriction = [ restriction ]
|
||||||
pass
|
self._also_restriction = restriction
|
||||||
|
|
||||||
def lift_also_restriction(self):
|
|
||||||
'''
|
|
||||||
Remove an also_restriction
|
|
||||||
'''
|
|
||||||
# HACK -- dead host skipping
|
|
||||||
pass
|
|
||||||
|
|
||||||
def subset(self, subset_pattern):
|
def subset(self, subset_pattern):
|
||||||
"""
|
"""
|
||||||
Limits inventory results to a subset of inventory that matches a given
|
Limits inventory results to a subset of inventory that matches a given
|
||||||
pattern, such as to select a subset of a hosts selection that also
|
pattern, such as to select a given geographic of numeric slice amongst
|
||||||
belongs to a certain geographic group or numeric slice.
|
a previous 'hosts' selection that only select roles, or vice versa.
|
||||||
Corresponds to --limit parameter to ansible-playbook
|
Corresponds to --limit parameter to ansible-playbook
|
||||||
|
|
||||||
:arg subset_pattern: The pattern to limit with. If this is None it
|
|
||||||
clears the subset. Multiple patterns may be specified as a comma,
|
|
||||||
semicolon, or colon separated string.
|
|
||||||
"""
|
"""
|
||||||
pass
|
if subset_pattern is None:
|
||||||
|
self._subset = None
|
||||||
|
else:
|
||||||
|
subset_pattern = subset_pattern.replace(',',':')
|
||||||
|
subset_pattern = subset_pattern.replace(";",":").split(":")
|
||||||
|
results = []
|
||||||
|
# allow Unix style @filename data
|
||||||
|
for x in subset_pattern:
|
||||||
|
if x.startswith("@"):
|
||||||
|
fd = open(x[1:])
|
||||||
|
results.extend(fd.read().split("\n"))
|
||||||
|
fd.close()
|
||||||
|
else:
|
||||||
|
results.append(x)
|
||||||
|
self._subset = results
|
||||||
|
|
||||||
|
def remove_restriction(self):
|
||||||
|
""" Do not restrict list operations """
|
||||||
|
self._restriction = None
|
||||||
|
|
||||||
|
def lift_also_restriction(self):
|
||||||
|
""" Clears the also restriction """
|
||||||
|
self._also_restriction = None
|
||||||
|
|
||||||
def is_file(self):
|
def is_file(self):
|
||||||
'''
|
""" did inventory come from a file? """
|
||||||
Did inventory come from a file?
|
if not isinstance(self.host_list, basestring):
|
||||||
|
return False
|
||||||
:returns: True if the inventory is file based, False otherwise
|
return os.path.exists(self.host_list)
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def basedir(self):
|
def basedir(self):
|
||||||
'''
|
""" if inventory came from a file, what's the directory? """
|
||||||
What directory was inventory read from
|
if not self.is_file():
|
||||||
|
return None
|
||||||
:returns: the path to the directory holding the inventory. None if
|
dname = os.path.dirname(self.host_list)
|
||||||
the inventory is not file based
|
if dname is None or dname == '' or dname == '.':
|
||||||
'''
|
cwd = os.getcwd()
|
||||||
pass
|
return os.path.abspath(cwd)
|
||||||
|
return os.path.abspath(dname)
|
||||||
|
|
||||||
def src(self):
|
def src(self):
|
||||||
'''
|
""" if inventory came from a file, what's the directory and file name? """
|
||||||
What's the complete path to the inventory file?
|
if not self.is_file():
|
||||||
|
return None
|
||||||
:returns: Complete path to the inventory file. None if inventory is
|
return self.host_list
|
||||||
not file-based
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def playbook_basedir(self):
|
def playbook_basedir(self):
|
||||||
'''
|
""" returns the directory of the current playbook """
|
||||||
Retrieve the directory of the current playbook
|
return self._playbook_basedir
|
||||||
'''
|
|
||||||
### I want to move this out of inventory
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
def set_playbook_basedir(self, dir):
|
def set_playbook_basedir(self, dir):
|
||||||
'''
|
"""
|
||||||
Tell Inventory the basedir of the current playbook so Inventory can
|
sets the base directory of the playbook so inventory can use it as a
|
||||||
look for host_vars and group_vars there.
|
basedir for host_ and group_vars, and other things.
|
||||||
'''
|
"""
|
||||||
### I want to move this out of inventory
|
# Only update things if dir is a different playbook basedir
|
||||||
pass
|
if dir != self._playbook_basedir:
|
||||||
|
self._playbook_basedir = dir
|
||||||
|
# get group vars from group_vars/ files
|
||||||
|
for group in self.groups:
|
||||||
|
# FIXME: combine_vars
|
||||||
|
group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
|
||||||
|
# get host vars from host_vars/ files
|
||||||
|
for host in self.get_hosts():
|
||||||
|
# FIXME: combine_vars
|
||||||
|
host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
|
||||||
|
# invalidate cache
|
||||||
|
self._vars_per_host = {}
|
||||||
|
self._vars_per_group = {}
|
||||||
|
|
||||||
def get_host_vars(self, host, new_pb_basedir=False):
|
def get_host_vars(self, host, new_pb_basedir=False):
|
||||||
'''
|
""" Read host_vars/ files """
|
||||||
Loads variables from host_vars/<hostname>
|
return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir)
|
||||||
|
|
||||||
The variables are loaded from subdirectories located either in the
|
|
||||||
inventory base directory or the playbook base directory. Variables in
|
|
||||||
the playbook dir will win over the inventory dir if files are in both.
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_group_vars(self, group, new_pb_basedir=False):
|
def get_group_vars(self, group, new_pb_basedir=False):
|
||||||
'''
|
""" Read group_vars/ files """
|
||||||
Loads variables from group_vars/<hostname>
|
return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir)
|
||||||
|
|
||||||
|
def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False):
|
||||||
|
"""
|
||||||
|
Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
|
||||||
|
to the inventory base directory or in the same directory as the playbook. Variables in the playbook
|
||||||
|
dir will win over the inventory dir if files are in both.
|
||||||
|
"""
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
scan_pass = 0
|
||||||
|
_basedir = self.basedir()
|
||||||
|
|
||||||
|
# look in both the inventory base directory and the playbook base directory
|
||||||
|
# unless we do an update for a new playbook base dir
|
||||||
|
if not new_pb_basedir:
|
||||||
|
basedirs = [_basedir, self._playbook_basedir]
|
||||||
|
else:
|
||||||
|
basedirs = [self._playbook_basedir]
|
||||||
|
|
||||||
|
for basedir in basedirs:
|
||||||
|
|
||||||
|
# this can happen from particular API usages, particularly if not run
|
||||||
|
# from /usr/bin/ansible-playbook
|
||||||
|
if basedir is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
scan_pass = scan_pass + 1
|
||||||
|
|
||||||
|
# it's not an eror if the directory does not exist, keep moving
|
||||||
|
if not os.path.exists(basedir):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# save work of second scan if the directories are the same
|
||||||
|
if _basedir == self._playbook_basedir and scan_pass != 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# FIXME: these should go to VariableManager
|
||||||
|
if group and host is None:
|
||||||
|
# load vars in dir/group_vars/name_of_group
|
||||||
|
base_path = os.path.join(basedir, "group_vars/%s" % group.name)
|
||||||
|
self._variable_manager.add_group_vars_file(base_path, self._loader)
|
||||||
|
elif host and group is None:
|
||||||
|
# same for hostvars in dir/host_vars/name_of_host
|
||||||
|
base_path = os.path.join(basedir, "host_vars/%s" % host.name)
|
||||||
|
self._variable_manager.add_host_vars_file(base_path, self._loader)
|
||||||
|
|
||||||
|
# all done, results is a dictionary of variables for this particular host.
|
||||||
|
return results
|
||||||
|
|
||||||
The variables are loaded from subdirectories located either in the
|
|
||||||
inventory base directory or the playbook base directory. Variables in
|
|
||||||
the playbook dir will win over the inventory dir if files are in both.
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
229
v2/ansible/inventory/dir.py
Normal file
229
v2/ansible/inventory/dir.py
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
|
||||||
|
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#############################################
|
||||||
|
|
||||||
|
import os
|
||||||
|
import ansible.constants as C
|
||||||
|
from ansible.inventory.host import Host
|
||||||
|
from ansible.inventory.group import Group
|
||||||
|
from ansible.inventory.ini import InventoryParser
|
||||||
|
from ansible.inventory.script import InventoryScript
|
||||||
|
from ansible import utils
|
||||||
|
from ansible import errors
|
||||||
|
|
||||||
|
class InventoryDirectory(object):
|
||||||
|
''' Host inventory parser for ansible using a directory of inventories. '''
|
||||||
|
|
||||||
|
def __init__(self, filename=C.DEFAULT_HOST_LIST):
|
||||||
|
self.names = os.listdir(filename)
|
||||||
|
self.names.sort()
|
||||||
|
self.directory = filename
|
||||||
|
self.parsers = []
|
||||||
|
self.hosts = {}
|
||||||
|
self.groups = {}
|
||||||
|
|
||||||
|
for i in self.names:
|
||||||
|
|
||||||
|
# Skip files that end with certain extensions or characters
|
||||||
|
if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
|
||||||
|
continue
|
||||||
|
# Skip hidden files
|
||||||
|
if i.startswith('.') and not i.startswith('./'):
|
||||||
|
continue
|
||||||
|
# These are things inside of an inventory basedir
|
||||||
|
if i in ("host_vars", "group_vars", "vars_plugins"):
|
||||||
|
continue
|
||||||
|
fullpath = os.path.join(self.directory, i)
|
||||||
|
if os.path.isdir(fullpath):
|
||||||
|
parser = InventoryDirectory(filename=fullpath)
|
||||||
|
elif utils.is_executable(fullpath):
|
||||||
|
parser = InventoryScript(filename=fullpath)
|
||||||
|
else:
|
||||||
|
parser = InventoryParser(filename=fullpath)
|
||||||
|
self.parsers.append(parser)
|
||||||
|
|
||||||
|
# retrieve all groups and hosts form the parser and add them to
|
||||||
|
# self, don't look at group lists yet, to avoid
|
||||||
|
# recursion trouble, but just make sure all objects exist in self
|
||||||
|
newgroups = parser.groups.values()
|
||||||
|
for group in newgroups:
|
||||||
|
for host in group.hosts:
|
||||||
|
self._add_host(host)
|
||||||
|
for group in newgroups:
|
||||||
|
self._add_group(group)
|
||||||
|
|
||||||
|
# now check the objects lists so they contain only objects from
|
||||||
|
# self; membership data in groups is already fine (except all &
|
||||||
|
# ungrouped, see later), but might still reference objects not in self
|
||||||
|
for group in self.groups.values():
|
||||||
|
# iterate on a copy of the lists, as those lists get changed in
|
||||||
|
# the loop
|
||||||
|
# list with group's child group objects:
|
||||||
|
for child in group.child_groups[:]:
|
||||||
|
if child != self.groups[child.name]:
|
||||||
|
group.child_groups.remove(child)
|
||||||
|
group.child_groups.append(self.groups[child.name])
|
||||||
|
# list with group's parent group objects:
|
||||||
|
for parent in group.parent_groups[:]:
|
||||||
|
if parent != self.groups[parent.name]:
|
||||||
|
group.parent_groups.remove(parent)
|
||||||
|
group.parent_groups.append(self.groups[parent.name])
|
||||||
|
# list with group's host objects:
|
||||||
|
for host in group.hosts[:]:
|
||||||
|
if host != self.hosts[host.name]:
|
||||||
|
group.hosts.remove(host)
|
||||||
|
group.hosts.append(self.hosts[host.name])
|
||||||
|
# also check here that the group that contains host, is
|
||||||
|
# also contained in the host's group list
|
||||||
|
if group not in self.hosts[host.name].groups:
|
||||||
|
self.hosts[host.name].groups.append(group)
|
||||||
|
|
||||||
|
# extra checks on special groups all and ungrouped
|
||||||
|
# remove hosts from 'ungrouped' if they became member of other groups
|
||||||
|
if 'ungrouped' in self.groups:
|
||||||
|
ungrouped = self.groups['ungrouped']
|
||||||
|
# loop on a copy of ungrouped hosts, as we want to change that list
|
||||||
|
for host in ungrouped.hosts[:]:
|
||||||
|
if len(host.groups) > 1:
|
||||||
|
host.groups.remove(ungrouped)
|
||||||
|
ungrouped.hosts.remove(host)
|
||||||
|
|
||||||
|
# remove hosts from 'all' if they became member of other groups
|
||||||
|
# all should only contain direct children, not grandchildren
|
||||||
|
# direct children should have dept == 1
|
||||||
|
if 'all' in self.groups:
|
||||||
|
allgroup = self.groups['all' ]
|
||||||
|
# loop on a copy of all's child groups, as we want to change that list
|
||||||
|
for group in allgroup.child_groups[:]:
|
||||||
|
# groups might once have beeen added to all, and later be added
|
||||||
|
# to another group: we need to remove the link wit all then
|
||||||
|
if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
|
||||||
|
# real children of all have just 1 parent, all
|
||||||
|
# this one has more, so not a direct child of all anymore
|
||||||
|
group.parent_groups.remove(allgroup)
|
||||||
|
allgroup.child_groups.remove(group)
|
||||||
|
elif allgroup not in group.parent_groups:
|
||||||
|
# this group was once added to all, but doesn't list it as
|
||||||
|
# a parent any more; the info in the group is the correct
|
||||||
|
# info
|
||||||
|
allgroup.child_groups.remove(group)
|
||||||
|
|
||||||
|
|
||||||
|
def _add_group(self, group):
|
||||||
|
""" Merge an existing group or add a new one;
|
||||||
|
Track parent and child groups, and hosts of the new one """
|
||||||
|
|
||||||
|
if group.name not in self.groups:
|
||||||
|
# it's brand new, add him!
|
||||||
|
self.groups[group.name] = group
|
||||||
|
if self.groups[group.name] != group:
|
||||||
|
# different object, merge
|
||||||
|
self._merge_groups(self.groups[group.name], group)
|
||||||
|
|
||||||
|
def _add_host(self, host):
|
||||||
|
if host.name not in self.hosts:
|
||||||
|
# Papa's got a brand new host
|
||||||
|
self.hosts[host.name] = host
|
||||||
|
if self.hosts[host.name] != host:
|
||||||
|
# different object, merge
|
||||||
|
self._merge_hosts(self.hosts[host.name], host)
|
||||||
|
|
||||||
|
def _merge_groups(self, group, newgroup):
|
||||||
|
""" Merge all of instance newgroup into group,
|
||||||
|
update parent/child relationships
|
||||||
|
group lists may still contain group objects that exist in self with
|
||||||
|
same name, but was instanciated as a different object in some other
|
||||||
|
inventory parser; these are handled later """
|
||||||
|
|
||||||
|
# name
|
||||||
|
if group.name != newgroup.name:
|
||||||
|
raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
|
||||||
|
|
||||||
|
# depth
|
||||||
|
group.depth = max([group.depth, newgroup.depth])
|
||||||
|
|
||||||
|
# hosts list (host objects are by now already added to self.hosts)
|
||||||
|
for host in newgroup.hosts:
|
||||||
|
grouphosts = dict([(h.name, h) for h in group.hosts])
|
||||||
|
if host.name in grouphosts:
|
||||||
|
# same host name but different object, merge
|
||||||
|
self._merge_hosts(grouphosts[host.name], host)
|
||||||
|
else:
|
||||||
|
# new membership, add host to group from self
|
||||||
|
# group from self will also be added again to host.groups, but
|
||||||
|
# as different object
|
||||||
|
group.add_host(self.hosts[host.name])
|
||||||
|
# now remove this the old object for group in host.groups
|
||||||
|
for hostgroup in [g for g in host.groups]:
|
||||||
|
if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
|
||||||
|
self.hosts[host.name].groups.remove(hostgroup)
|
||||||
|
|
||||||
|
|
||||||
|
# group child membership relation
|
||||||
|
for newchild in newgroup.child_groups:
|
||||||
|
# dict with existing child groups:
|
||||||
|
childgroups = dict([(g.name, g) for g in group.child_groups])
|
||||||
|
# check if child of new group is already known as a child
|
||||||
|
if newchild.name not in childgroups:
|
||||||
|
self.groups[group.name].add_child_group(newchild)
|
||||||
|
|
||||||
|
# group parent membership relation
|
||||||
|
for newparent in newgroup.parent_groups:
|
||||||
|
# dict with existing parent groups:
|
||||||
|
parentgroups = dict([(g.name, g) for g in group.parent_groups])
|
||||||
|
# check if parent of new group is already known as a parent
|
||||||
|
if newparent.name not in parentgroups:
|
||||||
|
if newparent.name not in self.groups:
|
||||||
|
# group does not exist yet in self, import him
|
||||||
|
self.groups[newparent.name] = newparent
|
||||||
|
# group now exists but not yet as a parent here
|
||||||
|
self.groups[newparent.name].add_child_group(group)
|
||||||
|
|
||||||
|
# variables
|
||||||
|
group.vars = utils.combine_vars(group.vars, newgroup.vars)
|
||||||
|
|
||||||
|
def _merge_hosts(self,host, newhost):
|
||||||
|
""" Merge all of instance newhost into host """
|
||||||
|
|
||||||
|
# name
|
||||||
|
if host.name != newhost.name:
|
||||||
|
raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
|
||||||
|
|
||||||
|
# group membership relation
|
||||||
|
for newgroup in newhost.groups:
|
||||||
|
# dict with existing groups:
|
||||||
|
hostgroups = dict([(g.name, g) for g in host.groups])
|
||||||
|
# check if new group is already known as a group
|
||||||
|
if newgroup.name not in hostgroups:
|
||||||
|
if newgroup.name not in self.groups:
|
||||||
|
# group does not exist yet in self, import him
|
||||||
|
self.groups[newgroup.name] = newgroup
|
||||||
|
# group now exists but doesn't have host yet
|
||||||
|
self.groups[newgroup.name].add_host(host)
|
||||||
|
|
||||||
|
# variables
|
||||||
|
host.vars = utils.combine_vars(host.vars, newhost.vars)
|
||||||
|
|
||||||
|
def get_host_variables(self, host):
|
||||||
|
""" Gets additional host variables from all inventories """
|
||||||
|
vars = {}
|
||||||
|
for i in self.parsers:
|
||||||
|
vars.update(i.get_host_variables(host))
|
||||||
|
return vars
|
||||||
|
|
116
v2/ansible/inventory/expand_hosts.py
Normal file
116
v2/ansible/inventory/expand_hosts.py
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
# (c) 2012, Zettar Inc.
|
||||||
|
# Written by Chin Fang <fangchin@zettar.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# This module is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This software is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
'''
|
||||||
|
This module is for enhancing ansible's inventory parsing capability such
|
||||||
|
that it can deal with hostnames specified using a simple pattern in the
|
||||||
|
form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified,
|
||||||
|
it defaults to 0.
|
||||||
|
|
||||||
|
If beg is given and is left-zero-padded, e.g. '001', it is taken as a
|
||||||
|
formatting hint when the range is expanded. e.g. [001:010] is to be
|
||||||
|
expanded into 001, 002 ...009, 010.
|
||||||
|
|
||||||
|
Note that when beg is specified with left zero padding, then the length of
|
||||||
|
end must be the same as that of beg, else an exception is raised.
|
||||||
|
'''
|
||||||
|
import string
|
||||||
|
|
||||||
|
from ansible import errors
|
||||||
|
|
||||||
|
def detect_range(line = None):
|
||||||
|
'''
|
||||||
|
A helper function that checks a given host line to see if it contains
|
||||||
|
a range pattern described in the docstring above.
|
||||||
|
|
||||||
|
Returnes True if the given line contains a pattern, else False.
|
||||||
|
'''
|
||||||
|
if 0 <= line.find("[") < line.find(":") < line.find("]"):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def expand_hostname_range(line = None):
|
||||||
|
'''
|
||||||
|
A helper function that expands a given line that contains a pattern
|
||||||
|
specified in top docstring, and returns a list that consists of the
|
||||||
|
expanded version.
|
||||||
|
|
||||||
|
The '[' and ']' characters are used to maintain the pseudo-code
|
||||||
|
appearance. They are replaced in this function with '|' to ease
|
||||||
|
string splitting.
|
||||||
|
|
||||||
|
References: http://ansible.github.com/patterns.html#hosts-and-groups
|
||||||
|
'''
|
||||||
|
all_hosts = []
|
||||||
|
if line:
|
||||||
|
# A hostname such as db[1:6]-node is considered to consists
|
||||||
|
# three parts:
|
||||||
|
# head: 'db'
|
||||||
|
# nrange: [1:6]; range() is a built-in. Can't use the name
|
||||||
|
# tail: '-node'
|
||||||
|
|
||||||
|
# Add support for multiple ranges in a host so:
|
||||||
|
# db[01:10:3]node-[01:10]
|
||||||
|
# - to do this we split off at the first [...] set, getting the list
|
||||||
|
# of hosts and then repeat until none left.
|
||||||
|
# - also add an optional third parameter which contains the step. (Default: 1)
|
||||||
|
# so range can be [01:10:2] -> 01 03 05 07 09
|
||||||
|
# FIXME: make this work for alphabetic sequences too.
|
||||||
|
|
||||||
|
(head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|')
|
||||||
|
bounds = nrange.split(":")
|
||||||
|
if len(bounds) != 2 and len(bounds) != 3:
|
||||||
|
raise errors.AnsibleError("host range incorrectly specified")
|
||||||
|
beg = bounds[0]
|
||||||
|
end = bounds[1]
|
||||||
|
if len(bounds) == 2:
|
||||||
|
step = 1
|
||||||
|
else:
|
||||||
|
step = bounds[2]
|
||||||
|
if not beg:
|
||||||
|
beg = "0"
|
||||||
|
if not end:
|
||||||
|
raise errors.AnsibleError("host range end value missing")
|
||||||
|
if beg[0] == '0' and len(beg) > 1:
|
||||||
|
rlen = len(beg) # range length formatting hint
|
||||||
|
if rlen != len(end):
|
||||||
|
raise errors.AnsibleError("host range format incorrectly specified!")
|
||||||
|
fill = lambda _: str(_).zfill(rlen) # range sequence
|
||||||
|
else:
|
||||||
|
fill = str
|
||||||
|
|
||||||
|
try:
|
||||||
|
i_beg = string.ascii_letters.index(beg)
|
||||||
|
i_end = string.ascii_letters.index(end)
|
||||||
|
if i_beg > i_end:
|
||||||
|
raise errors.AnsibleError("host range format incorrectly specified!")
|
||||||
|
seq = string.ascii_letters[i_beg:i_end+1]
|
||||||
|
except ValueError: # not an alpha range
|
||||||
|
seq = range(int(beg), int(end)+1, int(step))
|
||||||
|
|
||||||
|
for rseq in seq:
|
||||||
|
hname = ''.join((head, fill(rseq), tail))
|
||||||
|
|
||||||
|
if detect_range(hname):
|
||||||
|
all_hosts.extend( expand_hostname_range( hname ) )
|
||||||
|
else:
|
||||||
|
all_hosts.append(hname)
|
||||||
|
|
||||||
|
return all_hosts
|
159
v2/ansible/inventory/group.py
Normal file
159
v2/ansible/inventory/group.py
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
class Group:
|
||||||
|
''' a group of ansible hosts '''
|
||||||
|
|
||||||
|
#__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
|
||||||
|
|
||||||
|
def __init__(self, name=None):
|
||||||
|
|
||||||
|
self.depth = 0
|
||||||
|
self.name = name
|
||||||
|
self.hosts = []
|
||||||
|
self.vars = {}
|
||||||
|
self.child_groups = []
|
||||||
|
self.parent_groups = []
|
||||||
|
self._hosts_cache = None
|
||||||
|
|
||||||
|
#self.clear_hosts_cache()
|
||||||
|
#if self.name is None:
|
||||||
|
# raise Exception("group name is required")
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.get_name()
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
return self.serialize()
|
||||||
|
|
||||||
|
def __setstate__(self, data):
|
||||||
|
return self.deserialize(data)
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
parent_groups = []
|
||||||
|
for parent in self.parent_groups:
|
||||||
|
parent_groups.append(parent.serialize())
|
||||||
|
|
||||||
|
result = dict(
|
||||||
|
name=self.name,
|
||||||
|
vars=self.vars.copy(),
|
||||||
|
parent_groups=parent_groups,
|
||||||
|
depth=self.depth,
|
||||||
|
)
|
||||||
|
|
||||||
|
debug("serializing group, result is: %s" % result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def deserialize(self, data):
|
||||||
|
debug("deserializing group, data is: %s" % data)
|
||||||
|
self.__init__()
|
||||||
|
self.name = data.get('name')
|
||||||
|
self.vars = data.get('vars', dict())
|
||||||
|
|
||||||
|
parent_groups = data.get('parent_groups', [])
|
||||||
|
for parent_data in parent_groups:
|
||||||
|
g = Group()
|
||||||
|
g.deserialize(parent_data)
|
||||||
|
self.parent_groups.append(g)
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
def add_child_group(self, group):
|
||||||
|
|
||||||
|
if self == group:
|
||||||
|
raise Exception("can't add group to itself")
|
||||||
|
|
||||||
|
# don't add if it's already there
|
||||||
|
if not group in self.child_groups:
|
||||||
|
self.child_groups.append(group)
|
||||||
|
|
||||||
|
# update the depth of the child
|
||||||
|
group.depth = max([self.depth+1, group.depth])
|
||||||
|
|
||||||
|
# update the depth of the grandchildren
|
||||||
|
group._check_children_depth()
|
||||||
|
|
||||||
|
# now add self to child's parent_groups list, but only if there
|
||||||
|
# isn't already a group with the same name
|
||||||
|
if not self.name in [g.name for g in group.parent_groups]:
|
||||||
|
group.parent_groups.append(self)
|
||||||
|
|
||||||
|
self.clear_hosts_cache()
|
||||||
|
|
||||||
|
def _check_children_depth(self):
|
||||||
|
|
||||||
|
for group in self.child_groups:
|
||||||
|
group.depth = max([self.depth+1, group.depth])
|
||||||
|
group._check_children_depth()
|
||||||
|
|
||||||
|
def add_host(self, host):
|
||||||
|
|
||||||
|
self.hosts.append(host)
|
||||||
|
host.add_group(self)
|
||||||
|
self.clear_hosts_cache()
|
||||||
|
|
||||||
|
def set_variable(self, key, value):
|
||||||
|
|
||||||
|
self.vars[key] = value
|
||||||
|
|
||||||
|
def clear_hosts_cache(self):
|
||||||
|
|
||||||
|
self._hosts_cache = None
|
||||||
|
for g in self.parent_groups:
|
||||||
|
g.clear_hosts_cache()
|
||||||
|
|
||||||
|
def get_hosts(self):
|
||||||
|
|
||||||
|
if self._hosts_cache is None:
|
||||||
|
self._hosts_cache = self._get_hosts()
|
||||||
|
|
||||||
|
return self._hosts_cache
|
||||||
|
|
||||||
|
def _get_hosts(self):
|
||||||
|
|
||||||
|
hosts = []
|
||||||
|
seen = {}
|
||||||
|
for kid in self.child_groups:
|
||||||
|
kid_hosts = kid.get_hosts()
|
||||||
|
for kk in kid_hosts:
|
||||||
|
if kk not in seen:
|
||||||
|
seen[kk] = 1
|
||||||
|
hosts.append(kk)
|
||||||
|
for mine in self.hosts:
|
||||||
|
if mine not in seen:
|
||||||
|
seen[mine] = 1
|
||||||
|
hosts.append(mine)
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
def get_vars(self):
|
||||||
|
return self.vars.copy()
|
||||||
|
|
||||||
|
def _get_ancestors(self):
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
for g in self.parent_groups:
|
||||||
|
results[g.name] = g
|
||||||
|
results.update(g._get_ancestors())
|
||||||
|
return results
|
||||||
|
|
||||||
|
def get_ancestors(self):
|
||||||
|
|
||||||
|
return self._get_ancestors().values()
|
||||||
|
|
127
v2/ansible/inventory/host.py
Normal file
127
v2/ansible/inventory/host.py
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.inventory.group import Group
|
||||||
|
from ansible.utils.vars import combine_vars
|
||||||
|
|
||||||
|
__all__ = ['Host']
|
||||||
|
|
||||||
|
class Host:
|
||||||
|
''' a single ansible host '''
|
||||||
|
|
||||||
|
#__slots__ = [ 'name', 'vars', 'groups' ]
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
return self.serialize()
|
||||||
|
|
||||||
|
def __setstate__(self, data):
|
||||||
|
return self.deserialize(data)
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
groups = []
|
||||||
|
for group in self.groups:
|
||||||
|
groups.append(group.serialize())
|
||||||
|
|
||||||
|
return dict(
|
||||||
|
name=self.name,
|
||||||
|
vars=self.vars.copy(),
|
||||||
|
ipv4_address=self.ipv4_address,
|
||||||
|
ipv6_address=self.ipv6_address,
|
||||||
|
port=self.port,
|
||||||
|
gathered_facts=self._gathered_facts,
|
||||||
|
groups=groups,
|
||||||
|
)
|
||||||
|
|
||||||
|
def deserialize(self, data):
|
||||||
|
self.__init__()
|
||||||
|
|
||||||
|
self.name = data.get('name')
|
||||||
|
self.vars = data.get('vars', dict())
|
||||||
|
self.ipv4_address = data.get('ipv4_address', '')
|
||||||
|
self.ipv6_address = data.get('ipv6_address', '')
|
||||||
|
self.port = data.get('port')
|
||||||
|
|
||||||
|
groups = data.get('groups', [])
|
||||||
|
for group_data in groups:
|
||||||
|
g = Group()
|
||||||
|
g.deserialize(group_data)
|
||||||
|
self.groups.append(g)
|
||||||
|
|
||||||
|
def __init__(self, name=None, port=None):
|
||||||
|
|
||||||
|
self.name = name
|
||||||
|
self.vars = {}
|
||||||
|
self.groups = []
|
||||||
|
|
||||||
|
self.ipv4_address = name
|
||||||
|
self.ipv6_address = name
|
||||||
|
|
||||||
|
if port and port != C.DEFAULT_REMOTE_PORT:
|
||||||
|
self.port = int(port)
|
||||||
|
else:
|
||||||
|
self.port = C.DEFAULT_REMOTE_PORT
|
||||||
|
|
||||||
|
self._gathered_facts = False
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.get_name()
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def gathered_facts(self):
|
||||||
|
return self._gathered_facts
|
||||||
|
|
||||||
|
def set_gathered_facts(self, gathered):
|
||||||
|
self._gathered_facts = gathered
|
||||||
|
|
||||||
|
def add_group(self, group):
|
||||||
|
|
||||||
|
self.groups.append(group)
|
||||||
|
|
||||||
|
def set_variable(self, key, value):
|
||||||
|
|
||||||
|
self.vars[key]=value
|
||||||
|
|
||||||
|
def get_groups(self):
|
||||||
|
|
||||||
|
groups = {}
|
||||||
|
for g in self.groups:
|
||||||
|
groups[g.name] = g
|
||||||
|
ancestors = g.get_ancestors()
|
||||||
|
for a in ancestors:
|
||||||
|
groups[a.name] = a
|
||||||
|
return groups.values()
|
||||||
|
|
||||||
|
def get_vars(self):
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
groups = self.get_groups()
|
||||||
|
for group in sorted(groups, key=lambda g: g.depth):
|
||||||
|
results = combine_vars(results, group.get_vars())
|
||||||
|
results = combine_vars(results, self.vars)
|
||||||
|
results['inventory_hostname'] = self.name
|
||||||
|
results['inventory_hostname_short'] = self.name.split('.')[0]
|
||||||
|
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
|
||||||
|
return results
|
||||||
|
|
215
v2/ansible/inventory/ini.py
Normal file
215
v2/ansible/inventory/ini.py
Normal file
|
@ -0,0 +1,215 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#############################################
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import shlex
|
||||||
|
import re
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.errors import *
|
||||||
|
from ansible.inventory.host import Host
|
||||||
|
from ansible.inventory.group import Group
|
||||||
|
from ansible.inventory.expand_hosts import detect_range
|
||||||
|
from ansible.inventory.expand_hosts import expand_hostname_range
|
||||||
|
|
||||||
|
class InventoryParser(object):
|
||||||
|
"""
|
||||||
|
Host inventory for ansible.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, filename=C.DEFAULT_HOST_LIST):
|
||||||
|
|
||||||
|
with open(filename) as fh:
|
||||||
|
self.lines = fh.readlines()
|
||||||
|
self.groups = {}
|
||||||
|
self.hosts = {}
|
||||||
|
self._parse()
|
||||||
|
|
||||||
|
def _parse(self):
|
||||||
|
|
||||||
|
self._parse_base_groups()
|
||||||
|
self._parse_group_children()
|
||||||
|
self._add_allgroup_children()
|
||||||
|
self._parse_group_variables()
|
||||||
|
return self.groups
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_value(v):
|
||||||
|
if "#" not in v:
|
||||||
|
try:
|
||||||
|
return ast.literal_eval(v)
|
||||||
|
# Using explicit exceptions.
|
||||||
|
# Likely a string that literal_eval does not like. We wil then just set it.
|
||||||
|
except ValueError:
|
||||||
|
# For some reason this was thought to be malformed.
|
||||||
|
pass
|
||||||
|
except SyntaxError:
|
||||||
|
# Is this a hash with an equals at the end?
|
||||||
|
pass
|
||||||
|
return v
|
||||||
|
|
||||||
|
# [webservers]
|
||||||
|
# alpha
|
||||||
|
# beta:2345
|
||||||
|
# gamma sudo=True user=root
|
||||||
|
# delta asdf=jkl favcolor=red
|
||||||
|
|
||||||
|
def _add_allgroup_children(self):
|
||||||
|
|
||||||
|
for group in self.groups.values():
|
||||||
|
if group.depth == 0 and group.name != 'all':
|
||||||
|
self.groups['all'].add_child_group(group)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_base_groups(self):
|
||||||
|
# FIXME: refactor
|
||||||
|
|
||||||
|
ungrouped = Group(name='ungrouped')
|
||||||
|
all = Group(name='all')
|
||||||
|
all.add_child_group(ungrouped)
|
||||||
|
|
||||||
|
self.groups = dict(all=all, ungrouped=ungrouped)
|
||||||
|
active_group_name = 'ungrouped'
|
||||||
|
|
||||||
|
for line in self.lines:
|
||||||
|
line = self._before_comment(line).strip()
|
||||||
|
if line.startswith("[") and line.endswith("]"):
|
||||||
|
active_group_name = line.replace("[","").replace("]","")
|
||||||
|
if ":vars" in line or ":children" in line:
|
||||||
|
active_group_name = active_group_name.rsplit(":", 1)[0]
|
||||||
|
if active_group_name not in self.groups:
|
||||||
|
new_group = self.groups[active_group_name] = Group(name=active_group_name)
|
||||||
|
active_group_name = None
|
||||||
|
elif active_group_name not in self.groups:
|
||||||
|
new_group = self.groups[active_group_name] = Group(name=active_group_name)
|
||||||
|
elif line.startswith(";") or line == '':
|
||||||
|
pass
|
||||||
|
elif active_group_name:
|
||||||
|
tokens = shlex.split(line)
|
||||||
|
if len(tokens) == 0:
|
||||||
|
continue
|
||||||
|
hostname = tokens[0]
|
||||||
|
port = C.DEFAULT_REMOTE_PORT
|
||||||
|
# Three cases to check:
|
||||||
|
# 0. A hostname that contains a range pesudo-code and a port
|
||||||
|
# 1. A hostname that contains just a port
|
||||||
|
if hostname.count(":") > 1:
|
||||||
|
# Possible an IPv6 address, or maybe a host line with multiple ranges
|
||||||
|
# IPv6 with Port XXX:XXX::XXX.port
|
||||||
|
# FQDN foo.example.com
|
||||||
|
if hostname.count(".") == 1:
|
||||||
|
(hostname, port) = hostname.rsplit(".", 1)
|
||||||
|
elif ("[" in hostname and
|
||||||
|
"]" in hostname and
|
||||||
|
":" in hostname and
|
||||||
|
(hostname.rindex("]") < hostname.rindex(":")) or
|
||||||
|
("]" not in hostname and ":" in hostname)):
|
||||||
|
(hostname, port) = hostname.rsplit(":", 1)
|
||||||
|
|
||||||
|
hostnames = []
|
||||||
|
if detect_range(hostname):
|
||||||
|
hostnames = expand_hostname_range(hostname)
|
||||||
|
else:
|
||||||
|
hostnames = [hostname]
|
||||||
|
|
||||||
|
for hn in hostnames:
|
||||||
|
host = None
|
||||||
|
if hn in self.hosts:
|
||||||
|
host = self.hosts[hn]
|
||||||
|
else:
|
||||||
|
host = Host(name=hn, port=port)
|
||||||
|
self.hosts[hn] = host
|
||||||
|
if len(tokens) > 1:
|
||||||
|
for t in tokens[1:]:
|
||||||
|
if t.startswith('#'):
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
(k,v) = t.split("=", 1)
|
||||||
|
except ValueError, e:
|
||||||
|
raise AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
|
||||||
|
if k == 'ansible_ssh_host':
|
||||||
|
host.ipv4_address = self._parse_value(v)
|
||||||
|
else:
|
||||||
|
host.set_variable(k, self._parse_value(v))
|
||||||
|
self.groups[active_group_name].add_host(host)
|
||||||
|
|
||||||
|
# [southeast:children]
|
||||||
|
# atlanta
|
||||||
|
# raleigh
|
||||||
|
|
||||||
|
def _parse_group_children(self):
|
||||||
|
group = None
|
||||||
|
|
||||||
|
for line in self.lines:
|
||||||
|
line = line.strip()
|
||||||
|
if line is None or line == '':
|
||||||
|
continue
|
||||||
|
if line.startswith("[") and ":children]" in line:
|
||||||
|
line = line.replace("[","").replace(":children]","")
|
||||||
|
group = self.groups.get(line, None)
|
||||||
|
if group is None:
|
||||||
|
group = self.groups[line] = Group(name=line)
|
||||||
|
elif line.startswith("#") or line.startswith(";"):
|
||||||
|
pass
|
||||||
|
elif line.startswith("["):
|
||||||
|
group = None
|
||||||
|
elif group:
|
||||||
|
kid_group = self.groups.get(line, None)
|
||||||
|
if kid_group is None:
|
||||||
|
raise AnsibleError("child group is not defined: (%s)" % line)
|
||||||
|
else:
|
||||||
|
group.add_child_group(kid_group)
|
||||||
|
|
||||||
|
|
||||||
|
# [webservers:vars]
|
||||||
|
# http_port=1234
|
||||||
|
# maxRequestsPerChild=200
|
||||||
|
|
||||||
|
def _parse_group_variables(self):
|
||||||
|
group = None
|
||||||
|
for line in self.lines:
|
||||||
|
line = line.strip()
|
||||||
|
if line.startswith("[") and ":vars]" in line:
|
||||||
|
line = line.replace("[","").replace(":vars]","")
|
||||||
|
group = self.groups.get(line, None)
|
||||||
|
if group is None:
|
||||||
|
raise AnsibleError("can't add vars to undefined group: %s" % line)
|
||||||
|
elif line.startswith("#") or line.startswith(";"):
|
||||||
|
pass
|
||||||
|
elif line.startswith("["):
|
||||||
|
group = None
|
||||||
|
elif line == '':
|
||||||
|
pass
|
||||||
|
elif group:
|
||||||
|
if "=" not in line:
|
||||||
|
raise AnsibleError("variables assigned to group must be in key=value form")
|
||||||
|
else:
|
||||||
|
(k, v) = [e.strip() for e in line.split("=", 1)]
|
||||||
|
group.set_variable(k, self._parse_value(v))
|
||||||
|
|
||||||
|
def get_host_variables(self, host):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _before_comment(self, msg):
|
||||||
|
''' what's the part of a string before a comment? '''
|
||||||
|
msg = msg.replace("\#","**NOT_A_COMMENT**")
|
||||||
|
msg = msg.split("#")[0]
|
||||||
|
msg = msg.replace("**NOT_A_COMMENT**","#")
|
||||||
|
return msg
|
||||||
|
|
150
v2/ansible/inventory/script.py
Normal file
150
v2/ansible/inventory/script.py
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#############################################
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import ansible.constants as C
|
||||||
|
from ansible.inventory.host import Host
|
||||||
|
from ansible.inventory.group import Group
|
||||||
|
from ansible.module_utils.basic import json_dict_unicode_to_bytes
|
||||||
|
from ansible import utils
|
||||||
|
from ansible import errors
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryScript(object):
|
||||||
|
''' Host inventory parser for ansible using external inventory scripts. '''
|
||||||
|
|
||||||
|
def __init__(self, filename=C.DEFAULT_HOST_LIST):
|
||||||
|
|
||||||
|
# Support inventory scripts that are not prefixed with some
|
||||||
|
# path information but happen to be in the current working
|
||||||
|
# directory when '.' is not in PATH.
|
||||||
|
self.filename = os.path.abspath(filename)
|
||||||
|
cmd = [ self.filename, "--list" ]
|
||||||
|
try:
|
||||||
|
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
except OSError, e:
|
||||||
|
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
|
||||||
|
(stdout, stderr) = sp.communicate()
|
||||||
|
self.data = stdout
|
||||||
|
# see comment about _meta below
|
||||||
|
self.host_vars_from_top = None
|
||||||
|
self.groups = self._parse(stderr)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse(self, err):
|
||||||
|
|
||||||
|
all_hosts = {}
|
||||||
|
|
||||||
|
# not passing from_remote because data from CMDB is trusted
|
||||||
|
self.raw = utils.parse_json(self.data)
|
||||||
|
self.raw = json_dict_unicode_to_bytes(self.raw)
|
||||||
|
|
||||||
|
all = Group('all')
|
||||||
|
groups = dict(all=all)
|
||||||
|
group = None
|
||||||
|
|
||||||
|
|
||||||
|
if 'failed' in self.raw:
|
||||||
|
sys.stderr.write(err + "\n")
|
||||||
|
raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
|
||||||
|
|
||||||
|
for (group_name, data) in self.raw.items():
|
||||||
|
|
||||||
|
# in Ansible 1.3 and later, a "_meta" subelement may contain
|
||||||
|
# a variable "hostvars" which contains a hash for each host
|
||||||
|
# if this "hostvars" exists at all then do not call --host for each
|
||||||
|
# host. This is for efficiency and scripts should still return data
|
||||||
|
# if called with --host for backwards compat with 1.2 and earlier.
|
||||||
|
|
||||||
|
if group_name == '_meta':
|
||||||
|
if 'hostvars' in data:
|
||||||
|
self.host_vars_from_top = data['hostvars']
|
||||||
|
continue
|
||||||
|
|
||||||
|
if group_name != all.name:
|
||||||
|
group = groups[group_name] = Group(group_name)
|
||||||
|
else:
|
||||||
|
group = all
|
||||||
|
host = None
|
||||||
|
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
data = {'hosts': data}
|
||||||
|
# is not those subkeys, then simplified syntax, host with vars
|
||||||
|
elif not any(k in data for k in ('hosts','vars')):
|
||||||
|
data = {'hosts': [group_name], 'vars': data}
|
||||||
|
|
||||||
|
if 'hosts' in data:
|
||||||
|
if not isinstance(data['hosts'], list):
|
||||||
|
raise errors.AnsibleError("You defined a group \"%s\" with bad "
|
||||||
|
"data for the host list:\n %s" % (group_name, data))
|
||||||
|
|
||||||
|
for hostname in data['hosts']:
|
||||||
|
if not hostname in all_hosts:
|
||||||
|
all_hosts[hostname] = Host(hostname)
|
||||||
|
host = all_hosts[hostname]
|
||||||
|
group.add_host(host)
|
||||||
|
|
||||||
|
if 'vars' in data:
|
||||||
|
if not isinstance(data['vars'], dict):
|
||||||
|
raise errors.AnsibleError("You defined a group \"%s\" with bad "
|
||||||
|
"data for variables:\n %s" % (group_name, data))
|
||||||
|
|
||||||
|
for k, v in data['vars'].iteritems():
|
||||||
|
if group.name == all.name:
|
||||||
|
all.set_variable(k, v)
|
||||||
|
else:
|
||||||
|
group.set_variable(k, v)
|
||||||
|
|
||||||
|
# Separate loop to ensure all groups are defined
|
||||||
|
for (group_name, data) in self.raw.items():
|
||||||
|
if group_name == '_meta':
|
||||||
|
continue
|
||||||
|
if isinstance(data, dict) and 'children' in data:
|
||||||
|
for child_name in data['children']:
|
||||||
|
if child_name in groups:
|
||||||
|
groups[group_name].add_child_group(groups[child_name])
|
||||||
|
|
||||||
|
for group in groups.values():
|
||||||
|
if group.depth == 0 and group.name != 'all':
|
||||||
|
all.add_child_group(group)
|
||||||
|
|
||||||
|
return groups
|
||||||
|
|
||||||
|
def get_host_variables(self, host):
|
||||||
|
""" Runs <script> --host <hostname> to determine additional host variables """
|
||||||
|
if self.host_vars_from_top is not None:
|
||||||
|
got = self.host_vars_from_top.get(host.name, {})
|
||||||
|
return got
|
||||||
|
|
||||||
|
|
||||||
|
cmd = [self.filename, "--host", host.name]
|
||||||
|
try:
|
||||||
|
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
except OSError, e:
|
||||||
|
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
|
||||||
|
(out, err) = sp.communicate()
|
||||||
|
if out.strip() == '':
|
||||||
|
return dict()
|
||||||
|
try:
|
||||||
|
return json_dict_unicode_to_bytes(utils.parse_json(out))
|
||||||
|
except ValueError:
|
||||||
|
raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
||||||
|
|
0
v2/ansible/inventory/vars_plugins/__init__.py
Normal file
0
v2/ansible/inventory/vars_plugins/__init__.py
Normal file
48
v2/ansible/inventory/vars_plugins/noop.py
Normal file
48
v2/ansible/inventory/vars_plugins/noop.py
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
class VarsModule(object):
|
||||||
|
|
||||||
|
"""
|
||||||
|
Loads variables for groups and/or hosts
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, inventory):
|
||||||
|
|
||||||
|
""" constructor """
|
||||||
|
|
||||||
|
self.inventory = inventory
|
||||||
|
self.inventory_basedir = inventory.basedir()
|
||||||
|
|
||||||
|
|
||||||
|
def run(self, host, vault_password=None):
|
||||||
|
""" For backwards compatibility, when only vars per host were retrieved
|
||||||
|
This method should return both host specific vars as well as vars
|
||||||
|
calculated from groups it is a member of """
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def get_host_vars(self, host, vault_password=None):
|
||||||
|
""" Get host specific variables. """
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def get_group_vars(self, group, vault_password=None):
|
||||||
|
""" Get group specific variables. """
|
||||||
|
return {}
|
||||||
|
|
17
v2/ansible/module_utils/__init__.py
Normal file
17
v2/ansible/module_utils/__init__.py
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
# 2013, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
103
v2/ansible/module_utils/a10.py
Normal file
103
v2/ansible/module_utils/a10.py
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
AXAPI_PORT_PROTOCOLS = {
|
||||||
|
'tcp': 2,
|
||||||
|
'udp': 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
AXAPI_VPORT_PROTOCOLS = {
|
||||||
|
'tcp': 2,
|
||||||
|
'udp': 3,
|
||||||
|
'fast-http': 9,
|
||||||
|
'http': 11,
|
||||||
|
'https': 12,
|
||||||
|
}
|
||||||
|
|
||||||
|
def a10_argument_spec():
|
||||||
|
return dict(
|
||||||
|
host=dict(type='str', required=True),
|
||||||
|
username=dict(type='str', aliases=['user', 'admin'], required=True),
|
||||||
|
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
|
||||||
|
write_config=dict(type='bool', default=False)
|
||||||
|
)
|
||||||
|
|
||||||
|
def axapi_failure(result):
|
||||||
|
if 'response' in result and result['response'].get('status') == 'fail':
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def axapi_call(module, url, post=None):
|
||||||
|
'''
|
||||||
|
Returns a datastructure based on the result of the API call
|
||||||
|
'''
|
||||||
|
rsp, info = fetch_url(module, url, data=post)
|
||||||
|
if not rsp or info['status'] >= 400:
|
||||||
|
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
|
||||||
|
try:
|
||||||
|
raw_data = rsp.read()
|
||||||
|
data = json.loads(raw_data)
|
||||||
|
except ValueError:
|
||||||
|
# at least one API call (system.action.write_config) returns
|
||||||
|
# XML even when JSON is requested, so do some minimal handling
|
||||||
|
# here to prevent failing even when the call succeeded
|
||||||
|
if 'status="ok"' in raw_data.lower():
|
||||||
|
data = {"response": {"status": "OK"}}
|
||||||
|
else:
|
||||||
|
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||||
|
except:
|
||||||
|
module.fail_json(msg="could not read the result from the host")
|
||||||
|
finally:
|
||||||
|
rsp.close()
|
||||||
|
return data
|
||||||
|
|
||||||
|
def axapi_authenticate(module, base_url, username, password):
|
||||||
|
url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
|
||||||
|
result = axapi_call(module, url)
|
||||||
|
if axapi_failure(result):
|
||||||
|
return module.fail_json(msg=result['response']['err']['msg'])
|
||||||
|
sessid = result['session_id']
|
||||||
|
return base_url + '&session_id=' + sessid
|
||||||
|
|
||||||
|
def axapi_enabled_disabled(flag):
|
||||||
|
'''
|
||||||
|
The axapi uses 0/1 integer values for flags, rather than strings
|
||||||
|
or booleans, so convert the given flag to a 0 or 1. For now, params
|
||||||
|
are specified as strings only so thats what we check.
|
||||||
|
'''
|
||||||
|
if flag == 'enabled':
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def axapi_get_port_protocol(protocol):
|
||||||
|
return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
|
||||||
|
|
||||||
|
def axapi_get_vport_protocol(protocol):
|
||||||
|
return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
|
||||||
|
|
1556
v2/ansible/module_utils/basic.py
Normal file
1556
v2/ansible/module_utils/basic.py
Normal file
File diff suppressed because it is too large
Load diff
194
v2/ansible/module_utils/ec2.py
Normal file
194
v2/ansible/module_utils/ec2.py
Normal file
|
@ -0,0 +1,194 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
try:
|
||||||
|
from distutils.version import LooseVersion
|
||||||
|
HAS_LOOSE_VERSION = True
|
||||||
|
except:
|
||||||
|
HAS_LOOSE_VERSION = False
|
||||||
|
|
||||||
|
AWS_REGIONS = [
|
||||||
|
'ap-northeast-1',
|
||||||
|
'ap-southeast-1',
|
||||||
|
'ap-southeast-2',
|
||||||
|
'cn-north-1',
|
||||||
|
'eu-central-1',
|
||||||
|
'eu-west-1',
|
||||||
|
'sa-east-1',
|
||||||
|
'us-east-1',
|
||||||
|
'us-west-1',
|
||||||
|
'us-west-2',
|
||||||
|
'us-gov-west-1',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def aws_common_argument_spec():
|
||||||
|
return dict(
|
||||||
|
ec2_url=dict(),
|
||||||
|
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
|
||||||
|
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
|
||||||
|
validate_certs=dict(default=True, type='bool'),
|
||||||
|
security_token=dict(no_log=True),
|
||||||
|
profile=dict(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def ec2_argument_spec():
|
||||||
|
spec = aws_common_argument_spec()
|
||||||
|
spec.update(
|
||||||
|
dict(
|
||||||
|
region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return spec
|
||||||
|
|
||||||
|
|
||||||
|
def boto_supports_profile_name():
|
||||||
|
return hasattr(boto.ec2.EC2Connection, 'profile_name')
|
||||||
|
|
||||||
|
|
||||||
|
def get_aws_connection_info(module):
|
||||||
|
|
||||||
|
# Check module args for credentials, then check environment vars
|
||||||
|
# access_key
|
||||||
|
|
||||||
|
ec2_url = module.params.get('ec2_url')
|
||||||
|
access_key = module.params.get('aws_access_key')
|
||||||
|
secret_key = module.params.get('aws_secret_key')
|
||||||
|
security_token = module.params.get('security_token')
|
||||||
|
region = module.params.get('region')
|
||||||
|
profile_name = module.params.get('profile')
|
||||||
|
validate_certs = module.params.get('validate_certs')
|
||||||
|
|
||||||
|
if not ec2_url:
|
||||||
|
if 'EC2_URL' in os.environ:
|
||||||
|
ec2_url = os.environ['EC2_URL']
|
||||||
|
elif 'AWS_URL' in os.environ:
|
||||||
|
ec2_url = os.environ['AWS_URL']
|
||||||
|
|
||||||
|
if not access_key:
|
||||||
|
if 'EC2_ACCESS_KEY' in os.environ:
|
||||||
|
access_key = os.environ['EC2_ACCESS_KEY']
|
||||||
|
elif 'AWS_ACCESS_KEY_ID' in os.environ:
|
||||||
|
access_key = os.environ['AWS_ACCESS_KEY_ID']
|
||||||
|
elif 'AWS_ACCESS_KEY' in os.environ:
|
||||||
|
access_key = os.environ['AWS_ACCESS_KEY']
|
||||||
|
else:
|
||||||
|
# in case access_key came in as empty string
|
||||||
|
access_key = None
|
||||||
|
|
||||||
|
if not secret_key:
|
||||||
|
if 'EC2_SECRET_KEY' in os.environ:
|
||||||
|
secret_key = os.environ['EC2_SECRET_KEY']
|
||||||
|
elif 'AWS_SECRET_ACCESS_KEY' in os.environ:
|
||||||
|
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
|
||||||
|
elif 'AWS_SECRET_KEY' in os.environ:
|
||||||
|
secret_key = os.environ['AWS_SECRET_KEY']
|
||||||
|
else:
|
||||||
|
# in case secret_key came in as empty string
|
||||||
|
secret_key = None
|
||||||
|
|
||||||
|
if not region:
|
||||||
|
if 'EC2_REGION' in os.environ:
|
||||||
|
region = os.environ['EC2_REGION']
|
||||||
|
elif 'AWS_REGION' in os.environ:
|
||||||
|
region = os.environ['AWS_REGION']
|
||||||
|
else:
|
||||||
|
# boto.config.get returns None if config not found
|
||||||
|
region = boto.config.get('Boto', 'aws_region')
|
||||||
|
if not region:
|
||||||
|
region = boto.config.get('Boto', 'ec2_region')
|
||||||
|
|
||||||
|
if not security_token:
|
||||||
|
if 'AWS_SECURITY_TOKEN' in os.environ:
|
||||||
|
security_token = os.environ['AWS_SECURITY_TOKEN']
|
||||||
|
else:
|
||||||
|
# in case security_token came in as empty string
|
||||||
|
security_token = None
|
||||||
|
|
||||||
|
boto_params = dict(aws_access_key_id=access_key,
|
||||||
|
aws_secret_access_key=secret_key,
|
||||||
|
security_token=security_token)
|
||||||
|
|
||||||
|
# profile_name only works as a key in boto >= 2.24
|
||||||
|
# so only set profile_name if passed as an argument
|
||||||
|
if profile_name:
|
||||||
|
if not boto_supports_profile_name():
|
||||||
|
module.fail_json("boto does not support profile_name before 2.24")
|
||||||
|
boto_params['profile_name'] = profile_name
|
||||||
|
|
||||||
|
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
|
||||||
|
boto_params['validate_certs'] = validate_certs
|
||||||
|
|
||||||
|
return region, ec2_url, boto_params
|
||||||
|
|
||||||
|
|
||||||
|
def get_ec2_creds(module):
|
||||||
|
''' for compatibility mode with old modules that don't/can't yet
|
||||||
|
use ec2_connect method '''
|
||||||
|
region, ec2_url, boto_params = get_aws_connection_info(module)
|
||||||
|
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
|
||||||
|
|
||||||
|
|
||||||
|
def boto_fix_security_token_in_profile(conn, profile_name):
|
||||||
|
''' monkey patch for boto issue boto/boto#2100 '''
|
||||||
|
profile = 'profile ' + profile_name
|
||||||
|
if boto.config.has_option(profile, 'aws_security_token'):
|
||||||
|
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
|
||||||
|
return conn
|
||||||
|
|
||||||
|
|
||||||
|
def connect_to_aws(aws_module, region, **params):
|
||||||
|
conn = aws_module.connect_to_region(region, **params)
|
||||||
|
if params.get('profile_name'):
|
||||||
|
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
|
||||||
|
return conn
|
||||||
|
|
||||||
|
|
||||||
|
def ec2_connect(module):
|
||||||
|
|
||||||
|
""" Return an ec2 connection"""
|
||||||
|
|
||||||
|
region, ec2_url, boto_params = get_aws_connection_info(module)
|
||||||
|
|
||||||
|
# If we have a region specified, connect to its endpoint.
|
||||||
|
if region:
|
||||||
|
try:
|
||||||
|
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
|
||||||
|
except boto.exception.NoAuthHandlerFound, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
# Otherwise, no region so we fallback to the old connection method
|
||||||
|
elif ec2_url:
|
||||||
|
try:
|
||||||
|
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
|
||||||
|
except boto.exception.NoAuthHandlerFound, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="Either region or ec2_url must be specified")
|
||||||
|
|
||||||
|
return ec2
|
2451
v2/ansible/module_utils/facts.py
Normal file
2451
v2/ansible/module_utils/facts.py
Normal file
File diff suppressed because it is too large
Load diff
87
v2/ansible/module_utils/gce.py
Normal file
87
v2/ansible/module_utils/gce.py
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
#
|
||||||
|
|
||||||
|
import pprint
|
||||||
|
|
||||||
|
USER_AGENT_PRODUCT="Ansible-gce"
|
||||||
|
USER_AGENT_VERSION="v1"
|
||||||
|
|
||||||
|
def gce_connect(module):
|
||||||
|
"""Return a Google Cloud Engine connection."""
|
||||||
|
service_account_email = module.params.get('service_account_email', None)
|
||||||
|
pem_file = module.params.get('pem_file', None)
|
||||||
|
project_id = module.params.get('project_id', None)
|
||||||
|
|
||||||
|
# If any of the values are not given as parameters, check the appropriate
|
||||||
|
# environment variables.
|
||||||
|
if not service_account_email:
|
||||||
|
service_account_email = os.environ.get('GCE_EMAIL', None)
|
||||||
|
if not project_id:
|
||||||
|
project_id = os.environ.get('GCE_PROJECT', None)
|
||||||
|
if not pem_file:
|
||||||
|
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
|
||||||
|
|
||||||
|
# If we still don't have one or more of our credentials, attempt to
|
||||||
|
# get the remaining values from the libcloud secrets file.
|
||||||
|
if service_account_email is None or pem_file is None:
|
||||||
|
try:
|
||||||
|
import secrets
|
||||||
|
except ImportError:
|
||||||
|
secrets = None
|
||||||
|
|
||||||
|
if hasattr(secrets, 'GCE_PARAMS'):
|
||||||
|
if not service_account_email:
|
||||||
|
service_account_email = secrets.GCE_PARAMS[0]
|
||||||
|
if not pem_file:
|
||||||
|
pem_file = secrets.GCE_PARAMS[1]
|
||||||
|
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||||
|
if not project_id:
|
||||||
|
project_id = keyword_params.get('project', None)
|
||||||
|
|
||||||
|
# If we *still* don't have the credentials we need, then it's time to
|
||||||
|
# just fail out.
|
||||||
|
if service_account_email is None or pem_file is None or project_id is None:
|
||||||
|
module.fail_json(msg='Missing GCE connection parameters in libcloud '
|
||||||
|
'secrets file.')
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
gce = get_driver(Provider.GCE)(service_account_email, pem_file, datacenter=module.params.get('zone'), project=project_id)
|
||||||
|
gce.connection.user_agent_append("%s/%s" % (
|
||||||
|
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
|
||||||
|
except (RuntimeError, ValueError), e:
|
||||||
|
module.fail_json(msg=str(e), changed=False)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg=unexpected_error_msg(e), changed=False)
|
||||||
|
|
||||||
|
return gce
|
||||||
|
|
||||||
|
def unexpected_error_msg(error):
|
||||||
|
"""Create an error string based on passed in error."""
|
||||||
|
return 'Unexpected response: ' + pprint.pformat(vars(error))
|
176
v2/ansible/module_utils/known_hosts.py
Normal file
176
v2/ansible/module_utils/known_hosts.py
Normal file
|
@ -0,0 +1,176 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
import hmac
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
try:
|
||||||
|
from hashlib import sha1
|
||||||
|
except ImportError:
|
||||||
|
import sha as sha1
|
||||||
|
|
||||||
|
HASHED_KEY_MAGIC = "|1|"
|
||||||
|
|
||||||
|
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
|
||||||
|
|
||||||
|
""" idempotently add a git url hostkey """
|
||||||
|
|
||||||
|
fqdn = get_fqdn(module.params['repo'])
|
||||||
|
|
||||||
|
if fqdn:
|
||||||
|
known_host = check_hostkey(module, fqdn)
|
||||||
|
if not known_host:
|
||||||
|
if accept_hostkey:
|
||||||
|
rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
|
||||||
|
if rc != 0:
|
||||||
|
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
|
||||||
|
|
||||||
|
def get_fqdn(repo_url):
|
||||||
|
|
||||||
|
""" chop the hostname out of a giturl """
|
||||||
|
|
||||||
|
result = None
|
||||||
|
if "@" in repo_url and "://" not in repo_url:
|
||||||
|
# most likely a git@ or ssh+git@ type URL
|
||||||
|
repo_url = repo_url.split("@", 1)[1]
|
||||||
|
if ":" in repo_url:
|
||||||
|
repo_url = repo_url.split(":")[0]
|
||||||
|
result = repo_url
|
||||||
|
elif "/" in repo_url:
|
||||||
|
repo_url = repo_url.split("/")[0]
|
||||||
|
result = repo_url
|
||||||
|
elif "://" in repo_url:
|
||||||
|
# this should be something we can parse with urlparse
|
||||||
|
parts = urlparse.urlparse(repo_url)
|
||||||
|
if 'ssh' not in parts[0] and 'git' not in parts[0]:
|
||||||
|
# don't try and scan a hostname that's not ssh
|
||||||
|
return None
|
||||||
|
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
|
||||||
|
# ensure we actually have a parts[1] before continuing.
|
||||||
|
if parts[1] != '':
|
||||||
|
result = parts[1]
|
||||||
|
if ":" in result:
|
||||||
|
result = result.split(":")[0]
|
||||||
|
if "@" in result:
|
||||||
|
result = result.split("@", 1)[1]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def check_hostkey(module, fqdn):
|
||||||
|
return not not_in_host_file(module, fqdn)
|
||||||
|
|
||||||
|
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
|
||||||
|
# the paramiko code to import and use this.
|
||||||
|
|
||||||
|
def not_in_host_file(self, host):
|
||||||
|
|
||||||
|
|
||||||
|
if 'USER' in os.environ:
|
||||||
|
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||||
|
else:
|
||||||
|
user_host_file = "~/.ssh/known_hosts"
|
||||||
|
user_host_file = os.path.expanduser(user_host_file)
|
||||||
|
|
||||||
|
host_file_list = []
|
||||||
|
host_file_list.append(user_host_file)
|
||||||
|
host_file_list.append("/etc/ssh/ssh_known_hosts")
|
||||||
|
host_file_list.append("/etc/ssh/ssh_known_hosts2")
|
||||||
|
|
||||||
|
hfiles_not_found = 0
|
||||||
|
for hf in host_file_list:
|
||||||
|
if not os.path.exists(hf):
|
||||||
|
hfiles_not_found += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
host_fh = open(hf)
|
||||||
|
except IOError, e:
|
||||||
|
hfiles_not_found += 1
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
data = host_fh.read()
|
||||||
|
host_fh.close()
|
||||||
|
|
||||||
|
for line in data.split("\n"):
|
||||||
|
if line is None or " " not in line:
|
||||||
|
continue
|
||||||
|
tokens = line.split()
|
||||||
|
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
|
||||||
|
# this is a hashed known host entry
|
||||||
|
try:
|
||||||
|
(kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
|
||||||
|
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
|
||||||
|
hash.update(host)
|
||||||
|
if hash.digest() == kn_host.decode('base64'):
|
||||||
|
return False
|
||||||
|
except:
|
||||||
|
# invalid hashed host key, skip it
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# standard host file entry
|
||||||
|
if host in tokens[0]:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
|
||||||
|
|
||||||
|
""" use ssh-keyscan to add the hostkey """
|
||||||
|
|
||||||
|
result = False
|
||||||
|
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
|
||||||
|
|
||||||
|
if 'USER' in os.environ:
|
||||||
|
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
|
||||||
|
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||||
|
else:
|
||||||
|
user_ssh_dir = "~/.ssh/"
|
||||||
|
user_host_file = "~/.ssh/known_hosts"
|
||||||
|
user_ssh_dir = os.path.expanduser(user_ssh_dir)
|
||||||
|
|
||||||
|
if not os.path.exists(user_ssh_dir):
|
||||||
|
if create_dir:
|
||||||
|
try:
|
||||||
|
os.makedirs(user_ssh_dir, 0700)
|
||||||
|
except:
|
||||||
|
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="%s does not exist" % user_ssh_dir)
|
||||||
|
elif not os.path.isdir(user_ssh_dir):
|
||||||
|
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
|
||||||
|
|
||||||
|
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
|
||||||
|
|
||||||
|
rc, out, err = module.run_command(this_cmd)
|
||||||
|
module.append_to_file(user_host_file, out)
|
||||||
|
|
||||||
|
return rc, out, err
|
||||||
|
|
69
v2/ansible/module_utils/openstack.py
Normal file
69
v2/ansible/module_utils/openstack.py
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def openstack_argument_spec():
|
||||||
|
# Consume standard OpenStack environment variables.
|
||||||
|
# This is mainly only useful for ad-hoc command line operation as
|
||||||
|
# in playbooks one would assume variables would be used appropriately
|
||||||
|
OS_AUTH_URL=os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
|
||||||
|
OS_PASSWORD=os.environ.get('OS_PASSWORD', None)
|
||||||
|
OS_REGION_NAME=os.environ.get('OS_REGION_NAME', None)
|
||||||
|
OS_USERNAME=os.environ.get('OS_USERNAME', 'admin')
|
||||||
|
OS_TENANT_NAME=os.environ.get('OS_TENANT_NAME', OS_USERNAME)
|
||||||
|
|
||||||
|
spec = dict(
|
||||||
|
login_username = dict(default=OS_USERNAME),
|
||||||
|
auth_url = dict(default=OS_AUTH_URL),
|
||||||
|
region_name = dict(default=OS_REGION_NAME),
|
||||||
|
availability_zone = dict(default=None),
|
||||||
|
)
|
||||||
|
if OS_PASSWORD:
|
||||||
|
spec['login_password'] = dict(default=OS_PASSWORD)
|
||||||
|
else:
|
||||||
|
spec['login_password'] = dict(required=True)
|
||||||
|
if OS_TENANT_NAME:
|
||||||
|
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
|
||||||
|
else:
|
||||||
|
spec['login_tenant_name'] = dict(required=True)
|
||||||
|
return spec
|
||||||
|
|
||||||
|
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for (k, v) in addresses.iteritems():
|
||||||
|
if key_name and k == key_name:
|
||||||
|
ret.extend([addrs['addr'] for addrs in v])
|
||||||
|
else:
|
||||||
|
for interface_spec in v:
|
||||||
|
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
|
||||||
|
ret.append(interface_spec['addr'])
|
||||||
|
return ret
|
||||||
|
|
144
v2/ansible/module_utils/powershell.ps1
Normal file
144
v2/ansible/module_utils/powershell.ps1
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2014, and others
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Helper function to parse Ansible JSON arguments from a file passed as
|
||||||
|
# the single argument to the module
|
||||||
|
# Example: $params = Parse-Args $args
|
||||||
|
Function Parse-Args($arguments)
|
||||||
|
{
|
||||||
|
$parameters = New-Object psobject;
|
||||||
|
If ($arguments.Length -gt 0)
|
||||||
|
{
|
||||||
|
$parameters = Get-Content $arguments[0] | ConvertFrom-Json;
|
||||||
|
}
|
||||||
|
$parameters;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Helper function to set an "attribute" on a psobject instance in powershell.
|
||||||
|
# This is a convenience to make adding Members to the object easier and
|
||||||
|
# slightly more pythonic
|
||||||
|
# Example: Set-Attr $result "changed" $true
|
||||||
|
Function Set-Attr($obj, $name, $value)
|
||||||
|
{
|
||||||
|
# If the provided $obj is undefined, define one to be nice
|
||||||
|
If (-not $obj.GetType)
|
||||||
|
{
|
||||||
|
$obj = New-Object psobject
|
||||||
|
}
|
||||||
|
|
||||||
|
$obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
|
||||||
|
}
|
||||||
|
|
||||||
|
# Helper function to convert a powershell object to JSON to echo it, exiting
|
||||||
|
# the script
|
||||||
|
# Example: Exit-Json $result
|
||||||
|
Function Exit-Json($obj)
|
||||||
|
{
|
||||||
|
# If the provided $obj is undefined, define one to be nice
|
||||||
|
If (-not $obj.GetType)
|
||||||
|
{
|
||||||
|
$obj = New-Object psobject
|
||||||
|
}
|
||||||
|
|
||||||
|
echo $obj | ConvertTo-Json -Depth 99
|
||||||
|
Exit
|
||||||
|
}
|
||||||
|
|
||||||
|
# Helper function to add the "msg" property and "failed" property, convert the
|
||||||
|
# powershell object to JSON and echo it, exiting the script
|
||||||
|
# Example: Fail-Json $result "This is the failure message"
|
||||||
|
Function Fail-Json($obj, $message = $null)
|
||||||
|
{
|
||||||
|
# If we weren't given 2 args, and the only arg was a string, create a new
|
||||||
|
# psobject and use the arg as the failure message
|
||||||
|
If ($message -eq $null -and $obj.GetType().Name -eq "String")
|
||||||
|
{
|
||||||
|
$message = $obj
|
||||||
|
$obj = New-Object psobject
|
||||||
|
}
|
||||||
|
# If the first args is undefined or not an object, make it an object
|
||||||
|
ElseIf (-not $obj.GetType -or $obj.GetType().Name -ne "PSCustomObject")
|
||||||
|
{
|
||||||
|
$obj = New-Object psobject
|
||||||
|
}
|
||||||
|
|
||||||
|
Set-Attr $obj "msg" $message
|
||||||
|
Set-Attr $obj "failed" $true
|
||||||
|
echo $obj | ConvertTo-Json -Depth 99
|
||||||
|
Exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Helper function to get an "attribute" from a psobject instance in powershell.
|
||||||
|
# This is a convenience to make getting Members from an object easier and
|
||||||
|
# slightly more pythonic
|
||||||
|
# Example: $attr = Get-Attr $response "code" -default "1"
|
||||||
|
#Note that if you use the failifempty option, you do need to specify resultobject as well.
|
||||||
|
Function Get-Attr($obj, $name, $default = $null,$resultobj, $failifempty=$false, $emptyattributefailmessage)
|
||||||
|
{
|
||||||
|
# Check if the provided Member $name exists in $obj and return it or the
|
||||||
|
# default
|
||||||
|
If ($obj.$name.GetType)
|
||||||
|
{
|
||||||
|
$obj.$name
|
||||||
|
}
|
||||||
|
Elseif($failifempty -eq $false)
|
||||||
|
{
|
||||||
|
$default
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!$emptyattributefailmessage) {$emptyattributefailmessage = "Missing required argument: $name"}
|
||||||
|
Fail-Json -obj $resultobj -message $emptyattributefailmessage
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
# Helper filter/pipeline function to convert a value to boolean following current
|
||||||
|
# Ansible practices
|
||||||
|
# Example: $is_true = "true" | ConvertTo-Bool
|
||||||
|
Function ConvertTo-Bool
|
||||||
|
{
|
||||||
|
param(
|
||||||
|
[parameter(valuefrompipeline=$true)]
|
||||||
|
$obj
|
||||||
|
)
|
||||||
|
|
||||||
|
$boolean_strings = "yes", "on", "1", "true", 1
|
||||||
|
$obj_string = [string]$obj
|
||||||
|
|
||||||
|
if (($obj.GetType().Name -eq "Boolean" -and $obj) -or $boolean_strings -contains $obj_string.ToLower())
|
||||||
|
{
|
||||||
|
$true
|
||||||
|
}
|
||||||
|
Else
|
||||||
|
{
|
||||||
|
$false
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
277
v2/ansible/module_utils/rax.py
Normal file
277
v2/ansible/module_utils/rax.py
Normal file
|
@ -0,0 +1,277 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by
|
||||||
|
# Ansible still belong to the author of the module, and may assign their own
|
||||||
|
# license to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
from uuid import UUID
|
||||||
|
|
||||||
|
|
||||||
|
FINAL_STATUSES = ('ACTIVE', 'ERROR')
|
||||||
|
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
|
||||||
|
'error', 'error_deleting')
|
||||||
|
|
||||||
|
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
|
||||||
|
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
|
||||||
|
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
|
||||||
|
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
|
||||||
|
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
|
||||||
|
|
||||||
|
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
|
||||||
|
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
|
||||||
|
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
|
||||||
|
|
||||||
|
|
||||||
|
def rax_slugify(value):
|
||||||
|
"""Prepend a key with rax_ and normalize the key name"""
|
||||||
|
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
|
||||||
|
|
||||||
|
|
||||||
|
def rax_clb_node_to_dict(obj):
|
||||||
|
"""Function to convert a CLB Node object to a dict"""
|
||||||
|
if not obj:
|
||||||
|
return {}
|
||||||
|
node = obj.to_dict()
|
||||||
|
node['id'] = obj.id
|
||||||
|
node['weight'] = obj.weight
|
||||||
|
return node
|
||||||
|
|
||||||
|
|
||||||
|
def rax_to_dict(obj, obj_type='standard'):
|
||||||
|
"""Generic function to convert a pyrax object to a dict
|
||||||
|
|
||||||
|
obj_type values:
|
||||||
|
standard
|
||||||
|
clb
|
||||||
|
server
|
||||||
|
|
||||||
|
"""
|
||||||
|
instance = {}
|
||||||
|
for key in dir(obj):
|
||||||
|
value = getattr(obj, key)
|
||||||
|
if obj_type == 'clb' and key == 'nodes':
|
||||||
|
instance[key] = []
|
||||||
|
for node in value:
|
||||||
|
instance[key].append(rax_clb_node_to_dict(node))
|
||||||
|
elif (isinstance(value, list) and len(value) > 0 and
|
||||||
|
not isinstance(value[0], NON_CALLABLES)):
|
||||||
|
instance[key] = []
|
||||||
|
for item in value:
|
||||||
|
instance[key].append(rax_to_dict(item))
|
||||||
|
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
|
||||||
|
if obj_type == 'server':
|
||||||
|
key = rax_slugify(key)
|
||||||
|
instance[key] = value
|
||||||
|
|
||||||
|
if obj_type == 'server':
|
||||||
|
for attr in ['id', 'accessIPv4', 'name', 'status']:
|
||||||
|
instance[attr] = instance.get(rax_slugify(attr))
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
|
def rax_find_image(module, rax_module, image):
|
||||||
|
cs = rax_module.cloudservers
|
||||||
|
try:
|
||||||
|
UUID(image)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
image = cs.images.find(human_id=image)
|
||||||
|
except(cs.exceptions.NotFound,
|
||||||
|
cs.exceptions.NoUniqueMatch):
|
||||||
|
try:
|
||||||
|
image = cs.images.find(name=image)
|
||||||
|
except (cs.exceptions.NotFound,
|
||||||
|
cs.exceptions.NoUniqueMatch):
|
||||||
|
module.fail_json(msg='No matching image found (%s)' %
|
||||||
|
image)
|
||||||
|
|
||||||
|
return rax_module.utils.get_id(image)
|
||||||
|
|
||||||
|
|
||||||
|
def rax_find_volume(module, rax_module, name):
|
||||||
|
cbs = rax_module.cloud_blockstorage
|
||||||
|
try:
|
||||||
|
UUID(name)
|
||||||
|
volume = cbs.get(name)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
volume = cbs.find(name=name)
|
||||||
|
except rax_module.exc.NotFound:
|
||||||
|
volume = None
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg='%s' % e)
|
||||||
|
return volume
|
||||||
|
|
||||||
|
|
||||||
|
def rax_find_network(module, rax_module, network):
|
||||||
|
cnw = rax_module.cloud_networks
|
||||||
|
try:
|
||||||
|
UUID(network)
|
||||||
|
except ValueError:
|
||||||
|
if network.lower() == 'public':
|
||||||
|
return cnw.get_server_networks(PUBLIC_NET_ID)
|
||||||
|
elif network.lower() == 'private':
|
||||||
|
return cnw.get_server_networks(SERVICE_NET_ID)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
network_obj = cnw.find_network_by_label(network)
|
||||||
|
except (rax_module.exceptions.NetworkNotFound,
|
||||||
|
rax_module.exceptions.NetworkLabelNotUnique):
|
||||||
|
module.fail_json(msg='No matching network found (%s)' %
|
||||||
|
network)
|
||||||
|
else:
|
||||||
|
return cnw.get_server_networks(network_obj)
|
||||||
|
else:
|
||||||
|
return cnw.get_server_networks(network)
|
||||||
|
|
||||||
|
|
||||||
|
def rax_find_server(module, rax_module, server):
|
||||||
|
cs = rax_module.cloudservers
|
||||||
|
try:
|
||||||
|
UUID(server)
|
||||||
|
server = cs.servers.get(server)
|
||||||
|
except ValueError:
|
||||||
|
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
|
||||||
|
if not servers:
|
||||||
|
module.fail_json(msg='No Server was matched by name, '
|
||||||
|
'try using the Server ID instead')
|
||||||
|
if len(servers) > 1:
|
||||||
|
module.fail_json(msg='Multiple servers matched by name, '
|
||||||
|
'try using the Server ID instead')
|
||||||
|
|
||||||
|
# We made it this far, grab the first and hopefully only server
|
||||||
|
# in the list
|
||||||
|
server = servers[0]
|
||||||
|
return server
|
||||||
|
|
||||||
|
|
||||||
|
def rax_find_loadbalancer(module, rax_module, loadbalancer):
|
||||||
|
clb = rax_module.cloud_loadbalancers
|
||||||
|
try:
|
||||||
|
found = clb.get(loadbalancer)
|
||||||
|
except:
|
||||||
|
found = []
|
||||||
|
for lb in clb.list():
|
||||||
|
if loadbalancer == lb.name:
|
||||||
|
found.append(lb)
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
module.fail_json(msg='No loadbalancer was matched')
|
||||||
|
|
||||||
|
if len(found) > 1:
|
||||||
|
module.fail_json(msg='Multiple loadbalancers matched')
|
||||||
|
|
||||||
|
# We made it this far, grab the first and hopefully only item
|
||||||
|
# in the list
|
||||||
|
found = found[0]
|
||||||
|
|
||||||
|
return found
|
||||||
|
|
||||||
|
|
||||||
|
def rax_argument_spec():
|
||||||
|
return dict(
|
||||||
|
api_key=dict(type='str', aliases=['password'], no_log=True),
|
||||||
|
auth_endpoint=dict(type='str'),
|
||||||
|
credentials=dict(type='str', aliases=['creds_file']),
|
||||||
|
env=dict(type='str'),
|
||||||
|
identity_type=dict(type='str', default='rackspace'),
|
||||||
|
region=dict(type='str'),
|
||||||
|
tenant_id=dict(type='str'),
|
||||||
|
tenant_name=dict(type='str'),
|
||||||
|
username=dict(type='str'),
|
||||||
|
verify_ssl=dict(choices=BOOLEANS, type='bool'),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def rax_required_together():
|
||||||
|
return [['api_key', 'username']]
|
||||||
|
|
||||||
|
|
||||||
|
def setup_rax_module(module, rax_module, region_required=True):
|
||||||
|
rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
|
||||||
|
rax_module.USER_AGENT)
|
||||||
|
|
||||||
|
api_key = module.params.get('api_key')
|
||||||
|
auth_endpoint = module.params.get('auth_endpoint')
|
||||||
|
credentials = module.params.get('credentials')
|
||||||
|
env = module.params.get('env')
|
||||||
|
identity_type = module.params.get('identity_type')
|
||||||
|
region = module.params.get('region')
|
||||||
|
tenant_id = module.params.get('tenant_id')
|
||||||
|
tenant_name = module.params.get('tenant_name')
|
||||||
|
username = module.params.get('username')
|
||||||
|
verify_ssl = module.params.get('verify_ssl')
|
||||||
|
|
||||||
|
if env is not None:
|
||||||
|
rax_module.set_environment(env)
|
||||||
|
|
||||||
|
rax_module.set_setting('identity_type', identity_type)
|
||||||
|
if verify_ssl is not None:
|
||||||
|
rax_module.set_setting('verify_ssl', verify_ssl)
|
||||||
|
if auth_endpoint is not None:
|
||||||
|
rax_module.set_setting('auth_endpoint', auth_endpoint)
|
||||||
|
if tenant_id is not None:
|
||||||
|
rax_module.set_setting('tenant_id', tenant_id)
|
||||||
|
if tenant_name is not None:
|
||||||
|
rax_module.set_setting('tenant_name', tenant_name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
username = username or os.environ.get('RAX_USERNAME')
|
||||||
|
if not username:
|
||||||
|
username = rax_module.get_setting('keyring_username')
|
||||||
|
if username:
|
||||||
|
api_key = 'USE_KEYRING'
|
||||||
|
if not api_key:
|
||||||
|
api_key = os.environ.get('RAX_API_KEY')
|
||||||
|
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
|
||||||
|
os.environ.get('RAX_CREDS_FILE'))
|
||||||
|
region = (region or os.environ.get('RAX_REGION') or
|
||||||
|
rax_module.get_setting('region'))
|
||||||
|
except KeyError, e:
|
||||||
|
module.fail_json(msg='Unable to load %s' % e.message)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if api_key and username:
|
||||||
|
if api_key == 'USE_KEYRING':
|
||||||
|
rax_module.keyring_auth(username, region=region)
|
||||||
|
else:
|
||||||
|
rax_module.set_credentials(username, api_key=api_key,
|
||||||
|
region=region)
|
||||||
|
elif credentials:
|
||||||
|
credentials = os.path.expanduser(credentials)
|
||||||
|
rax_module.set_credential_file(credentials, region=region)
|
||||||
|
else:
|
||||||
|
raise Exception('No credentials supplied!')
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg='%s' % e.message)
|
||||||
|
|
||||||
|
if region_required and region not in rax_module.regions:
|
||||||
|
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
|
||||||
|
(region, ','.join(rax_module.regions)))
|
||||||
|
|
||||||
|
return rax_module
|
280
v2/ansible/module_utils/redhat.py
Normal file
280
v2/ansible/module_utils/redhat.py
Normal file
|
@ -0,0 +1,280 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), James Laska
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import types
|
||||||
|
import ConfigParser
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
|
||||||
|
class RegistrationBase(object):
|
||||||
|
def __init__(self, module, username=None, password=None):
|
||||||
|
self.module = module
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
raise NotImplementedError("Must be implemented by a sub-class")
|
||||||
|
|
||||||
|
def enable(self):
|
||||||
|
# Remove any existing redhat.repo
|
||||||
|
redhat_repo = '/etc/yum.repos.d/redhat.repo'
|
||||||
|
if os.path.isfile(redhat_repo):
|
||||||
|
os.unlink(redhat_repo)
|
||||||
|
|
||||||
|
def register(self):
|
||||||
|
raise NotImplementedError("Must be implemented by a sub-class")
|
||||||
|
|
||||||
|
def unregister(self):
|
||||||
|
raise NotImplementedError("Must be implemented by a sub-class")
|
||||||
|
|
||||||
|
def unsubscribe(self):
|
||||||
|
raise NotImplementedError("Must be implemented by a sub-class")
|
||||||
|
|
||||||
|
def update_plugin_conf(self, plugin, enabled=True):
|
||||||
|
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
|
||||||
|
if os.path.isfile(plugin_conf):
|
||||||
|
cfg = ConfigParser.ConfigParser()
|
||||||
|
cfg.read([plugin_conf])
|
||||||
|
if enabled:
|
||||||
|
cfg.set('main', 'enabled', 1)
|
||||||
|
else:
|
||||||
|
cfg.set('main', 'enabled', 0)
|
||||||
|
fd = open(plugin_conf, 'rwa+')
|
||||||
|
cfg.write(fd)
|
||||||
|
fd.close()
|
||||||
|
|
||||||
|
def subscribe(self, **kwargs):
|
||||||
|
raise NotImplementedError("Must be implemented by a sub-class")
|
||||||
|
|
||||||
|
|
||||||
|
class Rhsm(RegistrationBase):
|
||||||
|
def __init__(self, module, username=None, password=None):
|
||||||
|
RegistrationBase.__init__(self, module, username, password)
|
||||||
|
self.config = self._read_config()
|
||||||
|
self.module = module
|
||||||
|
|
||||||
|
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
|
||||||
|
'''
|
||||||
|
Load RHSM configuration from /etc/rhsm/rhsm.conf.
|
||||||
|
Returns:
|
||||||
|
* ConfigParser object
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Read RHSM defaults ...
|
||||||
|
cp = ConfigParser.ConfigParser()
|
||||||
|
cp.read(rhsm_conf)
|
||||||
|
|
||||||
|
# Add support for specifying a default value w/o having to standup some configuration
|
||||||
|
# Yeah, I know this should be subclassed ... but, oh well
|
||||||
|
def get_option_default(self, key, default=''):
|
||||||
|
sect, opt = key.split('.', 1)
|
||||||
|
if self.has_section(sect) and self.has_option(sect, opt):
|
||||||
|
return self.get(sect, opt)
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
|
||||||
|
|
||||||
|
return cp
|
||||||
|
|
||||||
|
def enable(self):
|
||||||
|
'''
|
||||||
|
Enable the system to receive updates from subscription-manager.
|
||||||
|
This involves updating affected yum plugins and removing any
|
||||||
|
conflicting yum repositories.
|
||||||
|
'''
|
||||||
|
RegistrationBase.enable(self)
|
||||||
|
self.update_plugin_conf('rhnplugin', False)
|
||||||
|
self.update_plugin_conf('subscription-manager', True)
|
||||||
|
|
||||||
|
def configure(self, **kwargs):
|
||||||
|
'''
|
||||||
|
Configure the system as directed for registration with RHN
|
||||||
|
Raises:
|
||||||
|
* Exception - if error occurs while running command
|
||||||
|
'''
|
||||||
|
args = ['subscription-manager', 'config']
|
||||||
|
|
||||||
|
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
|
||||||
|
# non-configuration parameters and replace '_' with '.'. For example,
|
||||||
|
# 'server_hostname' becomes '--system.hostname'.
|
||||||
|
for k,v in kwargs.items():
|
||||||
|
if re.search(r'^(system|rhsm)_', k):
|
||||||
|
args.append('--%s=%s' % (k.replace('_','.'), v))
|
||||||
|
|
||||||
|
self.module.run_command(args, check_rc=True)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_registered(self):
|
||||||
|
'''
|
||||||
|
Determine whether the current system
|
||||||
|
Returns:
|
||||||
|
* Boolean - whether the current system is currently registered to
|
||||||
|
RHN.
|
||||||
|
'''
|
||||||
|
# Quick version...
|
||||||
|
if False:
|
||||||
|
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
|
||||||
|
os.path.isfile('/etc/pki/consumer/key.pem')
|
||||||
|
|
||||||
|
args = ['subscription-manager', 'identity']
|
||||||
|
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
|
||||||
|
if rc == 0:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def register(self, username, password, autosubscribe, activationkey):
|
||||||
|
'''
|
||||||
|
Register the current system to the provided RHN server
|
||||||
|
Raises:
|
||||||
|
* Exception - if error occurs while running command
|
||||||
|
'''
|
||||||
|
args = ['subscription-manager', 'register']
|
||||||
|
|
||||||
|
# Generate command arguments
|
||||||
|
if activationkey:
|
||||||
|
args.append('--activationkey "%s"' % activationkey)
|
||||||
|
else:
|
||||||
|
if autosubscribe:
|
||||||
|
args.append('--autosubscribe')
|
||||||
|
if username:
|
||||||
|
args.extend(['--username', username])
|
||||||
|
if password:
|
||||||
|
args.extend(['--password', password])
|
||||||
|
|
||||||
|
# Do the needful...
|
||||||
|
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||||
|
|
||||||
|
def unsubscribe(self):
|
||||||
|
'''
|
||||||
|
Unsubscribe a system from all subscribed channels
|
||||||
|
Raises:
|
||||||
|
* Exception - if error occurs while running command
|
||||||
|
'''
|
||||||
|
args = ['subscription-manager', 'unsubscribe', '--all']
|
||||||
|
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||||
|
|
||||||
|
def unregister(self):
|
||||||
|
'''
|
||||||
|
Unregister a currently registered system
|
||||||
|
Raises:
|
||||||
|
* Exception - if error occurs while running command
|
||||||
|
'''
|
||||||
|
args = ['subscription-manager', 'unregister']
|
||||||
|
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||||
|
|
||||||
|
def subscribe(self, regexp):
|
||||||
|
'''
|
||||||
|
Subscribe current system to available pools matching the specified
|
||||||
|
regular expression
|
||||||
|
Raises:
|
||||||
|
* Exception - if error occurs while running command
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Available pools ready for subscription
|
||||||
|
available_pools = RhsmPools(self.module)
|
||||||
|
|
||||||
|
for pool in available_pools.filter(regexp):
|
||||||
|
pool.subscribe()
|
||||||
|
|
||||||
|
|
||||||
|
class RhsmPool(object):
|
||||||
|
'''
|
||||||
|
Convenience class for housing subscription information
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, module, **kwargs):
|
||||||
|
self.module = module
|
||||||
|
for k,v in kwargs.items():
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(self.__getattribute__('_name'))
|
||||||
|
|
||||||
|
def subscribe(self):
|
||||||
|
args = "subscription-manager subscribe --pool %s" % self.PoolId
|
||||||
|
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||||
|
if rc == 0:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class RhsmPools(object):
|
||||||
|
"""
|
||||||
|
This class is used for manipulating pools subscriptions with RHSM
|
||||||
|
"""
|
||||||
|
def __init__(self, module):
|
||||||
|
self.module = module
|
||||||
|
self.products = self._load_product_list()
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self.products.__iter__()
|
||||||
|
|
||||||
|
def _load_product_list(self):
|
||||||
|
"""
|
||||||
|
Loads list of all available pools for system in data structure
|
||||||
|
"""
|
||||||
|
args = "subscription-manager list --available"
|
||||||
|
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||||
|
|
||||||
|
products = []
|
||||||
|
for line in stdout.split('\n'):
|
||||||
|
# Remove leading+trailing whitespace
|
||||||
|
line = line.strip()
|
||||||
|
# An empty line implies the end of an output group
|
||||||
|
if len(line) == 0:
|
||||||
|
continue
|
||||||
|
# If a colon ':' is found, parse
|
||||||
|
elif ':' in line:
|
||||||
|
(key, value) = line.split(':',1)
|
||||||
|
key = key.strip().replace(" ", "") # To unify
|
||||||
|
value = value.strip()
|
||||||
|
if key in ['ProductName', 'SubscriptionName']:
|
||||||
|
# Remember the name for later processing
|
||||||
|
products.append(RhsmPool(self.module, _name=value, key=value))
|
||||||
|
elif products:
|
||||||
|
# Associate value with most recently recorded product
|
||||||
|
products[-1].__setattr__(key, value)
|
||||||
|
# FIXME - log some warning?
|
||||||
|
#else:
|
||||||
|
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
|
||||||
|
return products
|
||||||
|
|
||||||
|
def filter(self, regexp='^$'):
|
||||||
|
'''
|
||||||
|
Return a list of RhsmPools whose name matches the provided regular expression
|
||||||
|
'''
|
||||||
|
r = re.compile(regexp)
|
||||||
|
for product in self.products:
|
||||||
|
if r.search(product._name):
|
||||||
|
yield product
|
||||||
|
|
201
v2/ansible/module_utils/splitter.py
Normal file
201
v2/ansible/module_utils/splitter.py
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
def _get_quote_state(token, quote_char):
|
||||||
|
'''
|
||||||
|
the goal of this block is to determine if the quoted string
|
||||||
|
is unterminated in which case it needs to be put back together
|
||||||
|
'''
|
||||||
|
# the char before the current one, used to see if
|
||||||
|
# the current character is escaped
|
||||||
|
prev_char = None
|
||||||
|
for idx, cur_char in enumerate(token):
|
||||||
|
if idx > 0:
|
||||||
|
prev_char = token[idx-1]
|
||||||
|
if cur_char in '"\'' and prev_char != '\\':
|
||||||
|
if quote_char:
|
||||||
|
if cur_char == quote_char:
|
||||||
|
quote_char = None
|
||||||
|
else:
|
||||||
|
quote_char = cur_char
|
||||||
|
return quote_char
|
||||||
|
|
||||||
|
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
|
||||||
|
'''
|
||||||
|
this function counts the number of opening/closing blocks for a
|
||||||
|
given opening/closing type and adjusts the current depth for that
|
||||||
|
block based on the difference
|
||||||
|
'''
|
||||||
|
num_open = token.count(open_token)
|
||||||
|
num_close = token.count(close_token)
|
||||||
|
if num_open != num_close:
|
||||||
|
cur_depth += (num_open - num_close)
|
||||||
|
if cur_depth < 0:
|
||||||
|
cur_depth = 0
|
||||||
|
return cur_depth
|
||||||
|
|
||||||
|
def split_args(args):
|
||||||
|
'''
|
||||||
|
Splits args on whitespace, but intelligently reassembles
|
||||||
|
those that may have been split over a jinja2 block or quotes.
|
||||||
|
|
||||||
|
When used in a remote module, we won't ever have to be concerned about
|
||||||
|
jinja2 blocks, however this function is/will be used in the
|
||||||
|
core portions as well before the args are templated.
|
||||||
|
|
||||||
|
example input: a=b c="foo bar"
|
||||||
|
example output: ['a=b', 'c="foo bar"']
|
||||||
|
|
||||||
|
Basically this is a variation shlex that has some more intelligence for
|
||||||
|
how Ansible needs to use it.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# the list of params parsed out of the arg string
|
||||||
|
# this is going to be the result value when we are donei
|
||||||
|
params = []
|
||||||
|
|
||||||
|
# here we encode the args, so we have a uniform charset to
|
||||||
|
# work with, and split on white space
|
||||||
|
args = args.strip()
|
||||||
|
try:
|
||||||
|
args = args.encode('utf-8')
|
||||||
|
do_decode = True
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
do_decode = False
|
||||||
|
items = args.split('\n')
|
||||||
|
|
||||||
|
# iterate over the tokens, and reassemble any that may have been
|
||||||
|
# split on a space inside a jinja2 block.
|
||||||
|
# ex if tokens are "{{", "foo", "}}" these go together
|
||||||
|
|
||||||
|
# These variables are used
|
||||||
|
# to keep track of the state of the parsing, since blocks and quotes
|
||||||
|
# may be nested within each other.
|
||||||
|
|
||||||
|
quote_char = None
|
||||||
|
inside_quotes = False
|
||||||
|
print_depth = 0 # used to count nested jinja2 {{ }} blocks
|
||||||
|
block_depth = 0 # used to count nested jinja2 {% %} blocks
|
||||||
|
comment_depth = 0 # used to count nested jinja2 {# #} blocks
|
||||||
|
|
||||||
|
# now we loop over each split chunk, coalescing tokens if the white space
|
||||||
|
# split occurred within quotes or a jinja2 block of some kind
|
||||||
|
for itemidx,item in enumerate(items):
|
||||||
|
|
||||||
|
# we split on spaces and newlines separately, so that we
|
||||||
|
# can tell which character we split on for reassembly
|
||||||
|
# inside quotation characters
|
||||||
|
tokens = item.strip().split(' ')
|
||||||
|
|
||||||
|
line_continuation = False
|
||||||
|
for idx,token in enumerate(tokens):
|
||||||
|
|
||||||
|
# if we hit a line continuation character, but
|
||||||
|
# we're not inside quotes, ignore it and continue
|
||||||
|
# on to the next token while setting a flag
|
||||||
|
if token == '\\' and not inside_quotes:
|
||||||
|
line_continuation = True
|
||||||
|
continue
|
||||||
|
|
||||||
|
# store the previous quoting state for checking later
|
||||||
|
was_inside_quotes = inside_quotes
|
||||||
|
quote_char = _get_quote_state(token, quote_char)
|
||||||
|
inside_quotes = quote_char is not None
|
||||||
|
|
||||||
|
# multiple conditions may append a token to the list of params,
|
||||||
|
# so we keep track with this flag to make sure it only happens once
|
||||||
|
# append means add to the end of the list, don't append means concatenate
|
||||||
|
# it to the end of the last token
|
||||||
|
appended = False
|
||||||
|
|
||||||
|
# if we're inside quotes now, but weren't before, append the token
|
||||||
|
# to the end of the list, since we'll tack on more to it later
|
||||||
|
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
|
||||||
|
# inside quotes (but aren't now) concat this token to the last param
|
||||||
|
if inside_quotes and not was_inside_quotes:
|
||||||
|
params.append(token)
|
||||||
|
appended = True
|
||||||
|
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
|
||||||
|
if idx == 0 and not inside_quotes and was_inside_quotes:
|
||||||
|
params[-1] = "%s%s" % (params[-1], token)
|
||||||
|
elif len(tokens) > 1:
|
||||||
|
spacer = ''
|
||||||
|
if idx > 0:
|
||||||
|
spacer = ' '
|
||||||
|
params[-1] = "%s%s%s" % (params[-1], spacer, token)
|
||||||
|
else:
|
||||||
|
spacer = ''
|
||||||
|
if not params[-1].endswith('\n') and idx == 0:
|
||||||
|
spacer = '\n'
|
||||||
|
params[-1] = "%s%s%s" % (params[-1], spacer, token)
|
||||||
|
appended = True
|
||||||
|
|
||||||
|
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
|
||||||
|
# and may append the current token to the params (if we haven't previously done so)
|
||||||
|
prev_print_depth = print_depth
|
||||||
|
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
|
||||||
|
if print_depth != prev_print_depth and not appended:
|
||||||
|
params.append(token)
|
||||||
|
appended = True
|
||||||
|
|
||||||
|
prev_block_depth = block_depth
|
||||||
|
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
|
||||||
|
if block_depth != prev_block_depth and not appended:
|
||||||
|
params.append(token)
|
||||||
|
appended = True
|
||||||
|
|
||||||
|
prev_comment_depth = comment_depth
|
||||||
|
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
|
||||||
|
if comment_depth != prev_comment_depth and not appended:
|
||||||
|
params.append(token)
|
||||||
|
appended = True
|
||||||
|
|
||||||
|
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
|
||||||
|
# yet appended anything to the list of params, we do so now
|
||||||
|
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
|
||||||
|
params.append(token)
|
||||||
|
|
||||||
|
# if this was the last token in the list, and we have more than
|
||||||
|
# one item (meaning we split on newlines), add a newline back here
|
||||||
|
# to preserve the original structure
|
||||||
|
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
|
||||||
|
if not params[-1].endswith('\n') or item == '':
|
||||||
|
params[-1] += '\n'
|
||||||
|
|
||||||
|
# always clear the line continuation flag
|
||||||
|
line_continuation = False
|
||||||
|
|
||||||
|
# If we're done and things are not at zero depth or we're still inside quotes,
|
||||||
|
# raise an error to indicate that the args were unbalanced
|
||||||
|
if print_depth or block_depth or comment_depth or inside_quotes:
|
||||||
|
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
|
||||||
|
|
||||||
|
# finally, we decode each param back to the unicode it was in the arg string
|
||||||
|
if do_decode:
|
||||||
|
params = [x.decode('utf-8') for x in params]
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def is_quoted(data):
|
||||||
|
return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
|
||||||
|
|
||||||
|
def unquote(data):
|
||||||
|
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
|
||||||
|
if is_quoted(data):
|
||||||
|
return data[1:-1]
|
||||||
|
return data
|
||||||
|
|
456
v2/ansible/module_utils/urls.py
Normal file
456
v2/ansible/module_utils/urls.py
Normal file
|
@ -0,0 +1,456 @@
|
||||||
|
# This code is part of Ansible, but is an independent component.
|
||||||
|
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||||
|
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||||
|
# still belong to the author of the module, and may assign their own license
|
||||||
|
# to the complete work.
|
||||||
|
#
|
||||||
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib
|
||||||
|
HAS_URLLIB = True
|
||||||
|
except:
|
||||||
|
HAS_URLLIB = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib2
|
||||||
|
HAS_URLLIB2 = True
|
||||||
|
except:
|
||||||
|
HAS_URLLIB2 = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urlparse
|
||||||
|
HAS_URLPARSE = True
|
||||||
|
except:
|
||||||
|
HAS_URLPARSE = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import ssl
|
||||||
|
HAS_SSL=True
|
||||||
|
except:
|
||||||
|
HAS_SSL=False
|
||||||
|
|
||||||
|
import httplib
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
|
||||||
|
# This is a dummy cacert provided for Mac OS since you need at least 1
|
||||||
|
# ca cert, regardless of validity, for Python on Mac OS to use the
|
||||||
|
# keychain functionality in OpenSSL for validating SSL certificates.
|
||||||
|
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
|
||||||
|
DUMMY_CA_CERT = """-----BEGIN CERTIFICATE-----
|
||||||
|
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
|
||||||
|
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
|
||||||
|
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
|
||||||
|
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
|
||||||
|
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
|
||||||
|
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
|
||||||
|
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
|
||||||
|
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
|
||||||
|
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
|
||||||
|
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
|
||||||
|
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
|
||||||
|
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
|
||||||
|
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
|
||||||
|
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
|
||||||
|
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
"""
|
||||||
|
|
||||||
|
class CustomHTTPSConnection(httplib.HTTPSConnection):
|
||||||
|
def connect(self):
|
||||||
|
"Connect to a host on a given (SSL) port."
|
||||||
|
|
||||||
|
if hasattr(self, 'source_address'):
|
||||||
|
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
|
||||||
|
else:
|
||||||
|
sock = socket.create_connection((self.host, self.port), self.timeout)
|
||||||
|
if self._tunnel_host:
|
||||||
|
self.sock = sock
|
||||||
|
self._tunnel()
|
||||||
|
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
|
||||||
|
|
||||||
|
class CustomHTTPSHandler(urllib2.HTTPSHandler):
|
||||||
|
|
||||||
|
def https_open(self, req):
|
||||||
|
return self.do_open(CustomHTTPSConnection, req)
|
||||||
|
|
||||||
|
https_request = urllib2.AbstractHTTPHandler.do_request_
|
||||||
|
|
||||||
|
def generic_urlparse(parts):
|
||||||
|
'''
|
||||||
|
Returns a dictionary of url parts as parsed by urlparse,
|
||||||
|
but accounts for the fact that older versions of that
|
||||||
|
library do not support named attributes (ie. .netloc)
|
||||||
|
'''
|
||||||
|
generic_parts = dict()
|
||||||
|
if hasattr(parts, 'netloc'):
|
||||||
|
# urlparse is newer, just read the fields straight
|
||||||
|
# from the parts object
|
||||||
|
generic_parts['scheme'] = parts.scheme
|
||||||
|
generic_parts['netloc'] = parts.netloc
|
||||||
|
generic_parts['path'] = parts.path
|
||||||
|
generic_parts['params'] = parts.params
|
||||||
|
generic_parts['query'] = parts.query
|
||||||
|
generic_parts['fragment'] = parts.fragment
|
||||||
|
generic_parts['username'] = parts.username
|
||||||
|
generic_parts['password'] = parts.password
|
||||||
|
generic_parts['hostname'] = parts.hostname
|
||||||
|
generic_parts['port'] = parts.port
|
||||||
|
else:
|
||||||
|
# we have to use indexes, and then parse out
|
||||||
|
# the other parts not supported by indexing
|
||||||
|
generic_parts['scheme'] = parts[0]
|
||||||
|
generic_parts['netloc'] = parts[1]
|
||||||
|
generic_parts['path'] = parts[2]
|
||||||
|
generic_parts['params'] = parts[3]
|
||||||
|
generic_parts['query'] = parts[4]
|
||||||
|
generic_parts['fragment'] = parts[5]
|
||||||
|
# get the username, password, etc.
|
||||||
|
try:
|
||||||
|
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
|
||||||
|
(auth, hostname, port) = netloc_re.match(parts[1])
|
||||||
|
if port:
|
||||||
|
# the capture group for the port will include the ':',
|
||||||
|
# so remove it and convert the port to an integer
|
||||||
|
port = int(port[1:])
|
||||||
|
if auth:
|
||||||
|
# the capture group above inclues the @, so remove it
|
||||||
|
# and then split it up based on the first ':' found
|
||||||
|
auth = auth[:-1]
|
||||||
|
username, password = auth.split(':', 1)
|
||||||
|
generic_parts['username'] = username
|
||||||
|
generic_parts['password'] = password
|
||||||
|
generic_parts['hostname'] = hostnme
|
||||||
|
generic_parts['port'] = port
|
||||||
|
except:
|
||||||
|
generic_parts['username'] = None
|
||||||
|
generic_parts['password'] = None
|
||||||
|
generic_parts['hostname'] = None
|
||||||
|
generic_parts['port'] = None
|
||||||
|
return generic_parts
|
||||||
|
|
||||||
|
class RequestWithMethod(urllib2.Request):
|
||||||
|
'''
|
||||||
|
Workaround for using DELETE/PUT/etc with urllib2
|
||||||
|
Originally contained in library/net_infrastructure/dnsmadeeasy
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, url, method, data=None, headers={}):
|
||||||
|
self._method = method
|
||||||
|
urllib2.Request.__init__(self, url, data, headers)
|
||||||
|
|
||||||
|
def get_method(self):
|
||||||
|
if self._method:
|
||||||
|
return self._method
|
||||||
|
else:
|
||||||
|
return urllib2.Request.get_method(self)
|
||||||
|
|
||||||
|
|
||||||
|
class SSLValidationHandler(urllib2.BaseHandler):
|
||||||
|
'''
|
||||||
|
A custom handler class for SSL validation.
|
||||||
|
|
||||||
|
Based on:
|
||||||
|
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
|
||||||
|
http://techknack.net/python-urllib2-handlers/
|
||||||
|
'''
|
||||||
|
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
|
||||||
|
|
||||||
|
def __init__(self, module, hostname, port):
|
||||||
|
self.module = module
|
||||||
|
self.hostname = hostname
|
||||||
|
self.port = port
|
||||||
|
|
||||||
|
def get_ca_certs(self):
|
||||||
|
# tries to find a valid CA cert in one of the
|
||||||
|
# standard locations for the current distribution
|
||||||
|
|
||||||
|
ca_certs = []
|
||||||
|
paths_checked = []
|
||||||
|
platform = get_platform()
|
||||||
|
distribution = get_distribution()
|
||||||
|
|
||||||
|
# build a list of paths to check for .crt/.pem files
|
||||||
|
# based on the platform type
|
||||||
|
paths_checked.append('/etc/ssl/certs')
|
||||||
|
if platform == 'Linux':
|
||||||
|
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
|
||||||
|
paths_checked.append('/etc/pki/tls/certs')
|
||||||
|
paths_checked.append('/usr/share/ca-certificates/cacert.org')
|
||||||
|
elif platform == 'FreeBSD':
|
||||||
|
paths_checked.append('/usr/local/share/certs')
|
||||||
|
elif platform == 'OpenBSD':
|
||||||
|
paths_checked.append('/etc/ssl')
|
||||||
|
elif platform == 'NetBSD':
|
||||||
|
ca_certs.append('/etc/openssl/certs')
|
||||||
|
elif platform == 'SunOS':
|
||||||
|
paths_checked.append('/opt/local/etc/openssl/certs')
|
||||||
|
|
||||||
|
# fall back to a user-deployed cert in a standard
|
||||||
|
# location if the OS platform one is not available
|
||||||
|
paths_checked.append('/etc/ansible')
|
||||||
|
|
||||||
|
tmp_fd, tmp_path = tempfile.mkstemp()
|
||||||
|
|
||||||
|
# Write the dummy ca cert if we are running on Mac OS X
|
||||||
|
if platform == 'Darwin':
|
||||||
|
os.write(tmp_fd, DUMMY_CA_CERT)
|
||||||
|
# Default Homebrew path for OpenSSL certs
|
||||||
|
paths_checked.append('/usr/local/etc/openssl')
|
||||||
|
|
||||||
|
# for all of the paths, find any .crt or .pem files
|
||||||
|
# and compile them into single temp file for use
|
||||||
|
# in the ssl check to speed up the test
|
||||||
|
for path in paths_checked:
|
||||||
|
if os.path.exists(path) and os.path.isdir(path):
|
||||||
|
dir_contents = os.listdir(path)
|
||||||
|
for f in dir_contents:
|
||||||
|
full_path = os.path.join(path, f)
|
||||||
|
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
|
||||||
|
try:
|
||||||
|
cert_file = open(full_path, 'r')
|
||||||
|
os.write(tmp_fd, cert_file.read())
|
||||||
|
os.write(tmp_fd, '\n')
|
||||||
|
cert_file.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return (tmp_path, paths_checked)
|
||||||
|
|
||||||
|
def validate_proxy_response(self, response, valid_codes=[200]):
|
||||||
|
'''
|
||||||
|
make sure we get back a valid code from the proxy
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
(http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
|
||||||
|
if int(resp_code) not in valid_codes:
|
||||||
|
raise Exception
|
||||||
|
except:
|
||||||
|
self.module.fail_json(msg='Connection to proxy failed')
|
||||||
|
|
||||||
|
def http_request(self, req):
|
||||||
|
tmp_ca_cert_path, paths_checked = self.get_ca_certs()
|
||||||
|
https_proxy = os.environ.get('https_proxy')
|
||||||
|
try:
|
||||||
|
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
if https_proxy:
|
||||||
|
proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy))
|
||||||
|
s.connect((proxy_parts.get('hostname'), proxy_parts.get('port')))
|
||||||
|
if proxy_parts.get('scheme') == 'http':
|
||||||
|
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
|
||||||
|
if proxy_parts.get('username'):
|
||||||
|
credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
|
||||||
|
s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip())
|
||||||
|
s.sendall('\r\n')
|
||||||
|
connect_result = s.recv(4096)
|
||||||
|
self.validate_proxy_response(connect_result)
|
||||||
|
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
|
||||||
|
else:
|
||||||
|
self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
|
||||||
|
else:
|
||||||
|
s.connect((self.hostname, self.port))
|
||||||
|
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
|
||||||
|
# close the ssl connection
|
||||||
|
#ssl_s.unwrap()
|
||||||
|
s.close()
|
||||||
|
except (ssl.SSLError, socket.error), e:
|
||||||
|
# fail if we tried all of the certs but none worked
|
||||||
|
if 'connection refused' in str(e).lower():
|
||||||
|
self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port))
|
||||||
|
else:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \
|
||||||
|
'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
|
||||||
|
'Paths checked for this platform: %s' % ", ".join(paths_checked)
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
# cleanup the temp file created, don't worry
|
||||||
|
# if it fails for some reason
|
||||||
|
os.remove(tmp_ca_cert_path)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return req
|
||||||
|
|
||||||
|
https_request = http_request
|
||||||
|
|
||||||
|
|
||||||
|
def url_argument_spec():
|
||||||
|
'''
|
||||||
|
Creates an argument spec that can be used with any module
|
||||||
|
that will be requesting content via urllib/urllib2
|
||||||
|
'''
|
||||||
|
return dict(
|
||||||
|
url = dict(),
|
||||||
|
force = dict(default='no', aliases=['thirsty'], type='bool'),
|
||||||
|
http_agent = dict(default='ansible-httpget'),
|
||||||
|
use_proxy = dict(default='yes', type='bool'),
|
||||||
|
validate_certs = dict(default='yes', type='bool'),
|
||||||
|
url_username = dict(required=False),
|
||||||
|
url_password = dict(required=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_url(module, url, data=None, headers=None, method=None,
|
||||||
|
use_proxy=True, force=False, last_mod_time=None, timeout=10):
|
||||||
|
'''
|
||||||
|
Fetches a file from an HTTP/FTP server using urllib2
|
||||||
|
'''
|
||||||
|
|
||||||
|
if not HAS_URLLIB:
|
||||||
|
module.fail_json(msg='urllib is not installed')
|
||||||
|
if not HAS_URLLIB2:
|
||||||
|
module.fail_json(msg='urllib2 is not installed')
|
||||||
|
elif not HAS_URLPARSE:
|
||||||
|
module.fail_json(msg='urlparse is not installed')
|
||||||
|
|
||||||
|
r = None
|
||||||
|
handlers = []
|
||||||
|
info = dict(url=url)
|
||||||
|
|
||||||
|
distribution = get_distribution()
|
||||||
|
# Get validate_certs from the module params
|
||||||
|
validate_certs = module.params.get('validate_certs', True)
|
||||||
|
|
||||||
|
# FIXME: change the following to use the generic_urlparse function
|
||||||
|
# to remove the indexed references for 'parsed'
|
||||||
|
parsed = urlparse.urlparse(url)
|
||||||
|
if parsed[0] == 'https':
|
||||||
|
if not HAS_SSL and validate_certs:
|
||||||
|
if distribution == 'Redhat':
|
||||||
|
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL')
|
||||||
|
else:
|
||||||
|
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
|
||||||
|
|
||||||
|
elif validate_certs:
|
||||||
|
# do the cert validation
|
||||||
|
netloc = parsed[1]
|
||||||
|
if '@' in netloc:
|
||||||
|
netloc = netloc.split('@', 1)[1]
|
||||||
|
if ':' in netloc:
|
||||||
|
hostname, port = netloc.split(':', 1)
|
||||||
|
else:
|
||||||
|
hostname = netloc
|
||||||
|
port = 443
|
||||||
|
# create the SSL validation handler and
|
||||||
|
# add it to the list of handlers
|
||||||
|
ssl_handler = SSLValidationHandler(module, hostname, port)
|
||||||
|
handlers.append(ssl_handler)
|
||||||
|
|
||||||
|
if parsed[0] != 'ftp':
|
||||||
|
username = module.params.get('url_username', '')
|
||||||
|
if username:
|
||||||
|
password = module.params.get('url_password', '')
|
||||||
|
netloc = parsed[1]
|
||||||
|
elif '@' in parsed[1]:
|
||||||
|
credentials, netloc = parsed[1].split('@', 1)
|
||||||
|
if ':' in credentials:
|
||||||
|
username, password = credentials.split(':', 1)
|
||||||
|
else:
|
||||||
|
username = credentials
|
||||||
|
password = ''
|
||||||
|
|
||||||
|
parsed = list(parsed)
|
||||||
|
parsed[1] = netloc
|
||||||
|
|
||||||
|
# reconstruct url without credentials
|
||||||
|
url = urlparse.urlunparse(parsed)
|
||||||
|
|
||||||
|
if username:
|
||||||
|
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||||
|
|
||||||
|
# this creates a password manager
|
||||||
|
passman.add_password(None, netloc, username, password)
|
||||||
|
|
||||||
|
# because we have put None at the start it will always
|
||||||
|
# use this username/password combination for urls
|
||||||
|
# for which `theurl` is a super-url
|
||||||
|
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||||
|
|
||||||
|
# create the AuthHandler
|
||||||
|
handlers.append(authhandler)
|
||||||
|
|
||||||
|
if not use_proxy:
|
||||||
|
proxyhandler = urllib2.ProxyHandler({})
|
||||||
|
handlers.append(proxyhandler)
|
||||||
|
|
||||||
|
# pre-2.6 versions of python cannot use the custom https
|
||||||
|
# handler, since the socket class is lacking this method
|
||||||
|
if hasattr(socket, 'create_connection'):
|
||||||
|
handlers.append(CustomHTTPSHandler)
|
||||||
|
|
||||||
|
opener = urllib2.build_opener(*handlers)
|
||||||
|
urllib2.install_opener(opener)
|
||||||
|
|
||||||
|
if method:
|
||||||
|
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
|
||||||
|
module.fail_json(msg='invalid HTTP request method; %s' % method.upper())
|
||||||
|
request = RequestWithMethod(url, method.upper(), data)
|
||||||
|
else:
|
||||||
|
request = urllib2.Request(url, data)
|
||||||
|
|
||||||
|
# add the custom agent header, to help prevent issues
|
||||||
|
# with sites that block the default urllib agent string
|
||||||
|
request.add_header('User-agent', module.params.get('http_agent'))
|
||||||
|
|
||||||
|
# if we're ok with getting a 304, set the timestamp in the
|
||||||
|
# header, otherwise make sure we don't get a cached copy
|
||||||
|
if last_mod_time and not force:
|
||||||
|
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
|
||||||
|
request.add_header('If-Modified-Since', tstamp)
|
||||||
|
else:
|
||||||
|
request.add_header('cache-control', 'no-cache')
|
||||||
|
|
||||||
|
# user defined headers now, which may override things we've set above
|
||||||
|
if headers:
|
||||||
|
if not isinstance(headers, dict):
|
||||||
|
module.fail_json("headers provided to fetch_url() must be a dict")
|
||||||
|
for header in headers:
|
||||||
|
request.add_header(header, headers[header])
|
||||||
|
|
||||||
|
try:
|
||||||
|
if sys.version_info < (2,6,0):
|
||||||
|
# urlopen in python prior to 2.6.0 did not
|
||||||
|
# have a timeout parameter
|
||||||
|
r = urllib2.urlopen(request, None)
|
||||||
|
else:
|
||||||
|
r = urllib2.urlopen(request, None, timeout)
|
||||||
|
info.update(r.info())
|
||||||
|
info['url'] = r.geturl() # The URL goes in too, because of redirects.
|
||||||
|
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
|
||||||
|
except urllib2.HTTPError, e:
|
||||||
|
info.update(dict(msg=str(e), status=e.code))
|
||||||
|
except urllib2.URLError, e:
|
||||||
|
code = int(getattr(e, 'code', -1))
|
||||||
|
info.update(dict(msg="Request failed: %s" % str(e), status=code))
|
||||||
|
except socket.error, e:
|
||||||
|
info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
|
||||||
|
except Exception, e:
|
||||||
|
info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
|
||||||
|
|
||||||
|
return r, info
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0
|
Subproject commit c16601fffac87c941eb15263f24552e91641963d
|
341
v2/ansible/new_inventory/__init__.py
Normal file
341
v2/ansible/new_inventory/__init__.py
Normal file
|
@ -0,0 +1,341 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#############################################
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.inventory.group import Group
|
||||||
|
from ansible.inventory.host import Host
|
||||||
|
from ansible.inventory.aggregate import InventoryAggregateParser
|
||||||
|
|
||||||
|
class Inventory:
|
||||||
|
'''
|
||||||
|
Create hosts and groups from inventory
|
||||||
|
|
||||||
|
Retrieve the hosts and groups that ansible knows about from this class.
|
||||||
|
|
||||||
|
Retrieve raw variables (non-expanded) from the Group and Host classes
|
||||||
|
returned from here.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, inventory_list=C.DEFAULT_HOST_LIST):
|
||||||
|
'''
|
||||||
|
:kwarg inventory_list: A list of inventory sources. This may be file
|
||||||
|
names which will be parsed as ini-like files, executable scripts
|
||||||
|
which return inventory data as json, directories of both of the above,
|
||||||
|
or hostnames. Files and directories are
|
||||||
|
:kwarg vault_password: Password to use if any of the inventory sources
|
||||||
|
are in an ansible vault
|
||||||
|
'''
|
||||||
|
|
||||||
|
self._restricted_to = None
|
||||||
|
self._filter_pattern = None
|
||||||
|
|
||||||
|
parser = InventoryAggregateParser(inventory_list)
|
||||||
|
parser.parse()
|
||||||
|
|
||||||
|
self._basedir = parser.basedir
|
||||||
|
self._hosts = parser.hosts
|
||||||
|
self._groups = parser.groups
|
||||||
|
|
||||||
|
def get_hosts(self):
|
||||||
|
'''
|
||||||
|
Return the list of hosts, after filtering based on any set pattern
|
||||||
|
and restricting the results based on the set host restrictions.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if self._filter_pattern:
|
||||||
|
hosts = self._filter_hosts()
|
||||||
|
else:
|
||||||
|
hosts = self._hosts[:]
|
||||||
|
|
||||||
|
if self._restricted_to is not None:
|
||||||
|
# this will preserve the order of hosts after intersecting them
|
||||||
|
res_set = set(hosts).intersection(self._restricted_to)
|
||||||
|
return [h for h in hosts if h in res_set]
|
||||||
|
else:
|
||||||
|
return hosts[:]
|
||||||
|
|
||||||
|
def get_groups(self):
|
||||||
|
'''
|
||||||
|
Retrieve the Group objects known to the Inventory
|
||||||
|
'''
|
||||||
|
|
||||||
|
return self._groups[:]
|
||||||
|
|
||||||
|
def get_host(self, hostname):
|
||||||
|
'''
|
||||||
|
Retrieve the Host object for a hostname
|
||||||
|
'''
|
||||||
|
|
||||||
|
for host in self._hosts:
|
||||||
|
if host.name == hostname:
|
||||||
|
return host
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_group(self, groupname):
|
||||||
|
'''
|
||||||
|
Retrieve the Group object for a groupname
|
||||||
|
'''
|
||||||
|
|
||||||
|
for group in self._groups:
|
||||||
|
if group.name == group_name:
|
||||||
|
return group
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def add_group(self, group):
|
||||||
|
'''
|
||||||
|
Add a new group to the inventory
|
||||||
|
'''
|
||||||
|
|
||||||
|
if group not in self._groups:
|
||||||
|
self._groups.append(group)
|
||||||
|
|
||||||
|
def set_filter_pattern(self, pattern='all'):
|
||||||
|
'''
|
||||||
|
Sets a pattern upon which hosts/groups will be filtered.
|
||||||
|
This pattern can contain logical groupings such as unions,
|
||||||
|
intersections and negations using special syntax.
|
||||||
|
'''
|
||||||
|
|
||||||
|
self._filter_pattern = pattern
|
||||||
|
|
||||||
|
def set_host_restriction(self, restriction):
|
||||||
|
'''
|
||||||
|
Restrict operations to hosts in the given list
|
||||||
|
'''
|
||||||
|
|
||||||
|
assert isinstance(restriction, list)
|
||||||
|
self._restricted_to = restriction[:]
|
||||||
|
|
||||||
|
def remove_host_restriction(self):
|
||||||
|
'''
|
||||||
|
Remove the restriction on hosts, if any.
|
||||||
|
'''
|
||||||
|
|
||||||
|
self._restricted_to = None
|
||||||
|
|
||||||
|
def _filter_hosts(self):
|
||||||
|
"""
|
||||||
|
Limits inventory results to a subset of inventory that matches a given
|
||||||
|
list of patterns, such as to select a subset of a hosts selection that also
|
||||||
|
belongs to a certain geographic group or numeric slice.
|
||||||
|
|
||||||
|
Corresponds to --limit parameter to ansible-playbook
|
||||||
|
|
||||||
|
:arg patterns: The pattern to limit with. If this is None it
|
||||||
|
clears the subset. Multiple patterns may be specified as a comma,
|
||||||
|
semicolon, or colon separated string.
|
||||||
|
"""
|
||||||
|
|
||||||
|
hosts = []
|
||||||
|
|
||||||
|
pattern_regular = []
|
||||||
|
pattern_intersection = []
|
||||||
|
pattern_exclude = []
|
||||||
|
|
||||||
|
patterns = self._pattern.replace(";",":").split(":")
|
||||||
|
for p in patterns:
|
||||||
|
if p.startswith("!"):
|
||||||
|
pattern_exclude.append(p)
|
||||||
|
elif p.startswith("&"):
|
||||||
|
pattern_intersection.append(p)
|
||||||
|
elif p:
|
||||||
|
pattern_regular.append(p)
|
||||||
|
|
||||||
|
# if no regular pattern was given, hence only exclude and/or intersection
|
||||||
|
# make that magically work
|
||||||
|
if pattern_regular == []:
|
||||||
|
pattern_regular = ['all']
|
||||||
|
|
||||||
|
# when applying the host selectors, run those without the "&" or "!"
|
||||||
|
# first, then the &s, then the !s.
|
||||||
|
patterns = pattern_regular + pattern_intersection + pattern_exclude
|
||||||
|
|
||||||
|
for p in patterns:
|
||||||
|
intersect = False
|
||||||
|
negate = False
|
||||||
|
if p.startswith('&'):
|
||||||
|
intersect = True
|
||||||
|
elif p.startswith('!'):
|
||||||
|
p = p[1:]
|
||||||
|
negate = True
|
||||||
|
|
||||||
|
target = self._resolve_pattern(p)
|
||||||
|
if isinstance(target, Host):
|
||||||
|
if negate and target in hosts:
|
||||||
|
# remove it
|
||||||
|
hosts.remove(target)
|
||||||
|
elif target not in hosts:
|
||||||
|
# for both union and intersections, we just append it
|
||||||
|
hosts.append(target)
|
||||||
|
else:
|
||||||
|
if intersect:
|
||||||
|
hosts = [ h for h in hosts if h not in target ]
|
||||||
|
elif negate:
|
||||||
|
hosts = [ h for h in hosts if h in target ]
|
||||||
|
else:
|
||||||
|
to_append = [ h for h in target if h.name not in [ y.name for y in hosts ] ]
|
||||||
|
hosts.extend(to_append)
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
def _resolve_pattern(self, pattern):
|
||||||
|
target = self.get_host(pattern)
|
||||||
|
if target:
|
||||||
|
return target
|
||||||
|
else:
|
||||||
|
(name, enumeration_details) = self._enumeration_info(pattern)
|
||||||
|
hpat = self._hosts_in_unenumerated_pattern(name)
|
||||||
|
result = self._apply_ranges(pattern, hpat)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _enumeration_info(self, pattern):
|
||||||
|
"""
|
||||||
|
returns (pattern, limits) taking a regular pattern and finding out
|
||||||
|
which parts of it correspond to start/stop offsets. limits is
|
||||||
|
a tuple of (start, stop) or None
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Do not parse regexes for enumeration info
|
||||||
|
if pattern.startswith('~'):
|
||||||
|
return (pattern, None)
|
||||||
|
|
||||||
|
# The regex used to match on the range, which can be [x] or [x-y].
|
||||||
|
pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
|
||||||
|
m = pattern_re.match(pattern)
|
||||||
|
if m:
|
||||||
|
(target, first, last, rest) = m.groups()
|
||||||
|
first = int(first)
|
||||||
|
if last:
|
||||||
|
if first < 0:
|
||||||
|
raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
|
||||||
|
last = int(last)
|
||||||
|
else:
|
||||||
|
last = first
|
||||||
|
return (target, (first, last))
|
||||||
|
else:
|
||||||
|
return (pattern, None)
|
||||||
|
|
||||||
|
def _apply_ranges(self, pat, hosts):
|
||||||
|
"""
|
||||||
|
given a pattern like foo, that matches hosts, return all of hosts
|
||||||
|
given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
|
||||||
|
"""
|
||||||
|
|
||||||
|
# If there are no hosts to select from, just return the
|
||||||
|
# empty set. This prevents trying to do selections on an empty set.
|
||||||
|
# issue#6258
|
||||||
|
if not hosts:
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
(loose_pattern, limits) = self._enumeration_info(pat)
|
||||||
|
if not limits:
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
(left, right) = limits
|
||||||
|
|
||||||
|
if left == '':
|
||||||
|
left = 0
|
||||||
|
if right == '':
|
||||||
|
right = 0
|
||||||
|
left=int(left)
|
||||||
|
right=int(right)
|
||||||
|
try:
|
||||||
|
if left != right:
|
||||||
|
return hosts[left:right]
|
||||||
|
else:
|
||||||
|
return [ hosts[left] ]
|
||||||
|
except IndexError:
|
||||||
|
raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
|
||||||
|
|
||||||
|
def _hosts_in_unenumerated_pattern(self, pattern):
|
||||||
|
""" Get all host names matching the pattern """
|
||||||
|
|
||||||
|
results = []
|
||||||
|
hosts = []
|
||||||
|
hostnames = set()
|
||||||
|
|
||||||
|
# ignore any negative checks here, this is handled elsewhere
|
||||||
|
pattern = pattern.replace("!","").replace("&", "")
|
||||||
|
|
||||||
|
def __append_host_to_results(host):
|
||||||
|
if host not in results and host.name not in hostnames:
|
||||||
|
hostnames.add(host.name)
|
||||||
|
results.append(host)
|
||||||
|
|
||||||
|
groups = self.get_groups()
|
||||||
|
for group in groups:
|
||||||
|
if pattern == 'all':
|
||||||
|
for host in group.get_hosts():
|
||||||
|
__append_host_to_results(host)
|
||||||
|
else:
|
||||||
|
if self._match(group.name, pattern):
|
||||||
|
for host in group.get_hosts():
|
||||||
|
__append_host_to_results(host)
|
||||||
|
else:
|
||||||
|
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
|
||||||
|
for host in matching_hosts:
|
||||||
|
__append_host_to_results(host)
|
||||||
|
|
||||||
|
if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
|
||||||
|
new_host = self._create_implicit_localhost(pattern)
|
||||||
|
results.append(new_host)
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _create_implicit_localhost(self, pattern):
|
||||||
|
new_host = Host(pattern)
|
||||||
|
new_host._connection = 'local'
|
||||||
|
new_host.set_variable("ansible_python_interpreter", sys.executable)
|
||||||
|
ungrouped = self.get_group("ungrouped")
|
||||||
|
if ungrouped is None:
|
||||||
|
self.add_group(Group('ungrouped'))
|
||||||
|
ungrouped = self.get_group('ungrouped')
|
||||||
|
self.get_group('all').add_child_group(ungrouped)
|
||||||
|
ungrouped.add_host(new_host)
|
||||||
|
return new_host
|
||||||
|
|
||||||
|
def is_file(self):
|
||||||
|
'''
|
||||||
|
Did inventory come from a file?
|
||||||
|
|
||||||
|
:returns: True if the inventory is file based, False otherwise
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
def src(self):
|
||||||
|
'''
|
||||||
|
What's the complete path to the inventory file?
|
||||||
|
|
||||||
|
:returns: Complete path to the inventory file. None if inventory is
|
||||||
|
not file-based
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
def basedir(self):
|
||||||
|
'''
|
||||||
|
What directory from which the inventory was read.
|
||||||
|
'''
|
||||||
|
|
||||||
|
return self._basedir
|
||||||
|
|
51
v2/ansible/new_inventory/host.py
Normal file
51
v2/ansible/new_inventory/host.py
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
class Host:
|
||||||
|
def __init__(self, name):
|
||||||
|
self._name = name
|
||||||
|
self._connection = None
|
||||||
|
self._ipv4_address = ''
|
||||||
|
self._ipv6_address = ''
|
||||||
|
self._port = 22
|
||||||
|
self._vars = dict()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.get_name()
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
return self._name
|
||||||
|
|
||||||
|
def get_groups(self):
|
||||||
|
return []
|
||||||
|
|
||||||
|
def set_variable(self, name, value):
|
||||||
|
''' sets a variable for this host '''
|
||||||
|
|
||||||
|
self._vars[name] = value
|
||||||
|
|
||||||
|
def get_vars(self):
|
||||||
|
''' returns all variables for this host '''
|
||||||
|
|
||||||
|
all_vars = self._vars.copy()
|
||||||
|
all_vars.update(dict(inventory_hostname=self._name))
|
||||||
|
return all_vars
|
||||||
|
|
|
@ -19,3 +19,203 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
from yaml import load, YAMLError
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleParserError
|
||||||
|
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
|
||||||
|
|
||||||
|
from ansible.parsing.vault import VaultLib
|
||||||
|
from ansible.parsing.splitter import unquote
|
||||||
|
from ansible.parsing.yaml.loader import AnsibleLoader
|
||||||
|
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
|
||||||
|
|
||||||
|
class DataLoader():
|
||||||
|
|
||||||
|
'''
|
||||||
|
The DataLoader class is used to load and parse YAML or JSON content,
|
||||||
|
either from a given file name or from a string that was previously
|
||||||
|
read in through other means. A Vault password can be specified, and
|
||||||
|
any vault-encrypted files will be decrypted.
|
||||||
|
|
||||||
|
Data read from files will also be cached, so the file will never be
|
||||||
|
read from disk more than once.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
dl = DataLoader()
|
||||||
|
(or)
|
||||||
|
dl = DataLoader(vault_password='foo')
|
||||||
|
|
||||||
|
ds = dl.load('...')
|
||||||
|
ds = dl.load_from_file('/path/to/file')
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, vault_password=None):
|
||||||
|
self._basedir = '.'
|
||||||
|
self._vault_password = vault_password
|
||||||
|
self._FILE_CACHE = dict()
|
||||||
|
|
||||||
|
self._vault = VaultLib(password=vault_password)
|
||||||
|
|
||||||
|
def load(self, data, file_name='<string>', show_content=True):
|
||||||
|
'''
|
||||||
|
Creates a python datastructure from the given data, which can be either
|
||||||
|
a JSON or YAML string.
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
# we first try to load this data as JSON
|
||||||
|
return json.loads(data)
|
||||||
|
except:
|
||||||
|
try:
|
||||||
|
# if loading JSON failed for any reason, we go ahead
|
||||||
|
# and try to parse it as YAML instead
|
||||||
|
return self._safe_load(data, file_name=file_name)
|
||||||
|
except YAMLError as yaml_exc:
|
||||||
|
self._handle_error(yaml_exc, file_name, show_content)
|
||||||
|
|
||||||
|
def load_from_file(self, file_name):
|
||||||
|
''' Loads data from a file, which can contain either JSON or YAML. '''
|
||||||
|
|
||||||
|
file_name = self.path_dwim(file_name)
|
||||||
|
|
||||||
|
# if the file has already been read in and cached, we'll
|
||||||
|
# return those results to avoid more file/vault operations
|
||||||
|
if file_name in self._FILE_CACHE:
|
||||||
|
return self._FILE_CACHE[file_name]
|
||||||
|
|
||||||
|
# read the file contents and load the data structure from them
|
||||||
|
(file_data, show_content) = self._get_file_contents(file_name)
|
||||||
|
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
|
||||||
|
|
||||||
|
# cache the file contents for next time
|
||||||
|
self._FILE_CACHE[file_name] = parsed_data
|
||||||
|
|
||||||
|
return parsed_data
|
||||||
|
|
||||||
|
def path_exists(self, path):
|
||||||
|
return os.path.exists(path)
|
||||||
|
|
||||||
|
def is_directory(self, path):
|
||||||
|
return os.path.isdir(path)
|
||||||
|
|
||||||
|
def is_file(self, path):
|
||||||
|
return os.path.isfile(path)
|
||||||
|
|
||||||
|
def _safe_load(self, stream, file_name=None):
|
||||||
|
''' Implements yaml.safe_load(), except using our custom loader class. '''
|
||||||
|
|
||||||
|
loader = AnsibleLoader(stream, file_name)
|
||||||
|
try:
|
||||||
|
return loader.get_single_data()
|
||||||
|
finally:
|
||||||
|
loader.dispose()
|
||||||
|
|
||||||
|
def _get_file_contents(self, file_name):
|
||||||
|
'''
|
||||||
|
Reads the file contents from the given file name, and will decrypt them
|
||||||
|
if they are found to be vault-encrypted.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if not self.path_exists(file_name) or not self.is_file(file_name):
|
||||||
|
raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name)
|
||||||
|
|
||||||
|
show_content = True
|
||||||
|
try:
|
||||||
|
with open(file_name, 'r') as f:
|
||||||
|
data = f.read()
|
||||||
|
if self._vault.is_encrypted(data):
|
||||||
|
data = self._vault.decrypt(data)
|
||||||
|
show_content = False
|
||||||
|
return (data, show_content)
|
||||||
|
except (IOError, OSError) as e:
|
||||||
|
raise AnsibleParserError("an error occured while trying to read the file '%s': %s" % (file_name, str(e)))
|
||||||
|
|
||||||
|
def _handle_error(self, yaml_exc, file_name, show_content):
|
||||||
|
'''
|
||||||
|
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
|
||||||
|
file name/position where a YAML exception occured, and raises an AnsibleParserError
|
||||||
|
to display the syntax exception information.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# if the YAML exception contains a problem mark, use it to construct
|
||||||
|
# an object the error class can use to display the faulty line
|
||||||
|
err_obj = None
|
||||||
|
if hasattr(yaml_exc, 'problem_mark'):
|
||||||
|
err_obj = AnsibleBaseYAMLObject()
|
||||||
|
err_obj.set_position_info(file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
|
||||||
|
|
||||||
|
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
|
||||||
|
|
||||||
|
def get_basedir(self):
|
||||||
|
''' returns the current basedir '''
|
||||||
|
return self._basedir
|
||||||
|
|
||||||
|
def set_basedir(self, basedir):
|
||||||
|
''' sets the base directory, used to find files when a relative path is given '''
|
||||||
|
|
||||||
|
if basedir is not None:
|
||||||
|
self._basedir = basedir
|
||||||
|
|
||||||
|
def path_dwim(self, given):
|
||||||
|
'''
|
||||||
|
make relative paths work like folks expect.
|
||||||
|
'''
|
||||||
|
|
||||||
|
given = unquote(given)
|
||||||
|
|
||||||
|
if given.startswith("/"):
|
||||||
|
return os.path.abspath(given)
|
||||||
|
elif given.startswith("~"):
|
||||||
|
return os.path.abspath(os.path.expanduser(given))
|
||||||
|
else:
|
||||||
|
return os.path.abspath(os.path.join(self._basedir, given))
|
||||||
|
|
||||||
|
def path_dwim_relative(self, role_path, dirname, source):
|
||||||
|
''' find one file in a directory one level up in a dir named dirname relative to current '''
|
||||||
|
|
||||||
|
basedir = os.path.dirname(role_path)
|
||||||
|
if os.path.islink(basedir):
|
||||||
|
# FIXME:
|
||||||
|
#basedir = unfrackpath(basedir)
|
||||||
|
template2 = os.path.join(basedir, dirname, source)
|
||||||
|
else:
|
||||||
|
template2 = os.path.join(basedir, '..', dirname, source)
|
||||||
|
|
||||||
|
source1 = os.path.join(role_path, dirname, source)
|
||||||
|
if os.path.exists(source1):
|
||||||
|
return source1
|
||||||
|
|
||||||
|
cur_basedir = self._basedir
|
||||||
|
self.set_basedir(basedir)
|
||||||
|
source2 = self.path_dwim(template2)
|
||||||
|
if os.path.exists(source2):
|
||||||
|
self.set_basedir(cur_basedir)
|
||||||
|
return source2
|
||||||
|
|
||||||
|
obvious_local_path = self.path_dwim(source)
|
||||||
|
if os.path.exists(obvious_local_path):
|
||||||
|
self.set_basedir(cur_basedir)
|
||||||
|
return obvious_local_path
|
||||||
|
|
||||||
|
self.set_basedir(cur_basedir)
|
||||||
|
return source2 # which does not exist
|
||||||
|
|
||||||
|
#def __getstate__(self):
|
||||||
|
# data = dict(
|
||||||
|
# basedir = self._basedir,
|
||||||
|
# vault_password = self._vault_password,
|
||||||
|
# FILE_CACHE = self._FILE_CACHE,
|
||||||
|
# )
|
||||||
|
# return data
|
||||||
|
|
||||||
|
#def __setstate__(self, data):
|
||||||
|
# self._basedir = data.get('basedir', '.')
|
||||||
|
# self._FILE_CACHE = data.get('FILE_CACHE', dict())
|
||||||
|
# self._vault_password = data.get('vault_password', '')
|
||||||
|
#
|
||||||
|
# self._vault = VaultLib(password=self._vault_password)
|
||||||
|
|
||||||
|
|
|
@ -20,9 +20,10 @@ from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
from six import iteritems, string_types
|
from six import iteritems, string_types
|
||||||
|
from types import NoneType
|
||||||
|
|
||||||
from ansible.errors import AnsibleParserError
|
from ansible.errors import AnsibleParserError
|
||||||
from ansible.plugins import module_finder
|
from ansible.plugins import module_loader
|
||||||
from ansible.parsing.splitter import parse_kv
|
from ansible.parsing.splitter import parse_kv
|
||||||
|
|
||||||
class ModuleArgsParser:
|
class ModuleArgsParser:
|
||||||
|
@ -120,7 +121,7 @@ class ModuleArgsParser:
|
||||||
(action, args) = self._normalize_new_style_args(thing)
|
(action, args) = self._normalize_new_style_args(thing)
|
||||||
|
|
||||||
# this can occasionally happen, simplify
|
# this can occasionally happen, simplify
|
||||||
if 'args' in args:
|
if args and 'args' in args:
|
||||||
args = args['args']
|
args = args['args']
|
||||||
|
|
||||||
return (action, args)
|
return (action, args)
|
||||||
|
@ -144,8 +145,11 @@ class ModuleArgsParser:
|
||||||
elif isinstance(thing, string_types):
|
elif isinstance(thing, string_types):
|
||||||
# form is like: local_action: copy src=a dest=b ... pretty common
|
# form is like: local_action: copy src=a dest=b ... pretty common
|
||||||
args = parse_kv(thing)
|
args = parse_kv(thing)
|
||||||
|
elif isinstance(thing, NoneType):
|
||||||
|
# this can happen with modules which take no params, like ping:
|
||||||
|
args = None
|
||||||
else:
|
else:
|
||||||
raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
|
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def _normalize_new_style_args(self, thing):
|
def _normalize_new_style_args(self, thing):
|
||||||
|
@ -180,7 +184,7 @@ class ModuleArgsParser:
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# need a dict or a string, so giving up
|
# need a dict or a string, so giving up
|
||||||
raise AnsibleParsingError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
|
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
|
||||||
|
|
||||||
return (action, args)
|
return (action, args)
|
||||||
|
|
||||||
|
@ -224,7 +228,7 @@ class ModuleArgsParser:
|
||||||
|
|
||||||
# walk the input dictionary to see we recognize a module name
|
# walk the input dictionary to see we recognize a module name
|
||||||
for (item, value) in iteritems(self._task_ds):
|
for (item, value) in iteritems(self._task_ds):
|
||||||
if item in module_finder:
|
if item in module_loader:
|
||||||
# finding more than one module name is a problem
|
# finding more than one module name is a problem
|
||||||
if action is not None:
|
if action is not None:
|
||||||
raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
|
raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
#
|
#
|
||||||
# This file is part of Ansible
|
# This file is part of Ansible
|
||||||
#
|
#
|
||||||
|
@ -15,27 +15,7 @@
|
||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from ansible import utils, errors
|
# Make coding more python3-ish
|
||||||
from ansible.utils import template
|
from __future__ import (absolute_import, division, print_function)
|
||||||
import os
|
__metaclass__ = type
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
try:
|
|
||||||
terms = template.template(self.basedir, terms, inject)
|
|
||||||
except Exception, e:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if isinstance(terms, basestring):
|
|
||||||
terms = [ terms ]
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
var = term.split()[0]
|
|
||||||
ret.append(os.getenv(var, ''))
|
|
||||||
return ret
|
|
26
v2/ansible/parsing/utils/jsonify.py
Normal file
26
v2/ansible/parsing/utils/jsonify.py
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# FIXME: header
|
||||||
|
|
||||||
|
try:
|
||||||
|
import json
|
||||||
|
except ImportError:
|
||||||
|
import simplejson as json
|
||||||
|
|
||||||
|
def jsonify(result, format=False):
|
||||||
|
''' format JSON output (uncompressed or uncompressed) '''
|
||||||
|
|
||||||
|
if result is None:
|
||||||
|
return "{}"
|
||||||
|
result2 = result.copy()
|
||||||
|
for key, value in result2.items():
|
||||||
|
if type(value) is str:
|
||||||
|
result2[key] = value.decode('utf-8', 'ignore')
|
||||||
|
|
||||||
|
indent = None
|
||||||
|
if format:
|
||||||
|
indent = 4
|
||||||
|
|
||||||
|
try:
|
||||||
|
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
return json.dumps(result2, sort_keys=True, indent=indent)
|
||||||
|
|
|
@ -19,156 +19,3 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
from yaml import load, YAMLError
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleParserError
|
|
||||||
|
|
||||||
from ansible.parsing.vault import VaultLib
|
|
||||||
from ansible.parsing.splitter import unquote
|
|
||||||
from ansible.parsing.yaml.loader import AnsibleLoader
|
|
||||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
|
|
||||||
from ansible.parsing.yaml.strings import YAML_SYNTAX_ERROR
|
|
||||||
|
|
||||||
class DataLoader():
|
|
||||||
|
|
||||||
'''
|
|
||||||
The DataLoader class is used to load and parse YAML or JSON content,
|
|
||||||
either from a given file name or from a string that was previously
|
|
||||||
read in through other means. A Vault password can be specified, and
|
|
||||||
any vault-encrypted files will be decrypted.
|
|
||||||
|
|
||||||
Data read from files will also be cached, so the file will never be
|
|
||||||
read from disk more than once.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
dl = DataLoader()
|
|
||||||
(or)
|
|
||||||
dl = DataLoader(vault_password='foo')
|
|
||||||
|
|
||||||
ds = dl.load('...')
|
|
||||||
ds = dl.load_from_file('/path/to/file')
|
|
||||||
'''
|
|
||||||
|
|
||||||
_FILE_CACHE = dict()
|
|
||||||
|
|
||||||
def __init__(self, vault_password=None):
|
|
||||||
self._basedir = '.'
|
|
||||||
self._vault = VaultLib(password=vault_password)
|
|
||||||
|
|
||||||
def load(self, data, file_name='<string>', show_content=True):
|
|
||||||
'''
|
|
||||||
Creates a python datastructure from the given data, which can be either
|
|
||||||
a JSON or YAML string.
|
|
||||||
'''
|
|
||||||
|
|
||||||
try:
|
|
||||||
# we first try to load this data as JSON
|
|
||||||
return json.loads(data)
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
# if loading JSON failed for any reason, we go ahead
|
|
||||||
# and try to parse it as YAML instead
|
|
||||||
return self._safe_load(data, file_name=file_name)
|
|
||||||
except YAMLError as yaml_exc:
|
|
||||||
self._handle_error(yaml_exc, file_name, show_content)
|
|
||||||
|
|
||||||
def load_from_file(self, file_name):
|
|
||||||
''' Loads data from a file, which can contain either JSON or YAML. '''
|
|
||||||
|
|
||||||
file_name = self.path_dwim(file_name)
|
|
||||||
|
|
||||||
# if the file has already been read in and cached, we'll
|
|
||||||
# return those results to avoid more file/vault operations
|
|
||||||
if file_name in self._FILE_CACHE:
|
|
||||||
return self._FILE_CACHE[file_name]
|
|
||||||
|
|
||||||
# read the file contents and load the data structure from them
|
|
||||||
(file_data, show_content) = self._get_file_contents(file_name)
|
|
||||||
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
|
|
||||||
|
|
||||||
# cache the file contents for next time
|
|
||||||
self._FILE_CACHE[file_name] = parsed_data
|
|
||||||
|
|
||||||
return parsed_data
|
|
||||||
|
|
||||||
def path_exists(self, path):
|
|
||||||
return os.path.exists(path)
|
|
||||||
|
|
||||||
def is_directory(self, path):
|
|
||||||
return os.path.isdir(path)
|
|
||||||
|
|
||||||
def is_file(self, path):
|
|
||||||
return os.path.isfile(path)
|
|
||||||
|
|
||||||
def _safe_load(self, stream, file_name=None):
|
|
||||||
''' Implements yaml.safe_load(), except using our custom loader class. '''
|
|
||||||
|
|
||||||
loader = AnsibleLoader(stream, file_name)
|
|
||||||
try:
|
|
||||||
return loader.get_single_data()
|
|
||||||
finally:
|
|
||||||
loader.dispose()
|
|
||||||
|
|
||||||
def _get_file_contents(self, file_name):
|
|
||||||
'''
|
|
||||||
Reads the file contents from the given file name, and will decrypt them
|
|
||||||
if they are found to be vault-encrypted.
|
|
||||||
'''
|
|
||||||
if not self.path_exists(file_name) or not self.is_file(file_name):
|
|
||||||
raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name)
|
|
||||||
|
|
||||||
show_content = True
|
|
||||||
try:
|
|
||||||
with open(file_name, 'r') as f:
|
|
||||||
data = f.read()
|
|
||||||
if self._vault.is_encrypted(data):
|
|
||||||
data = self._vault.decrypt(data)
|
|
||||||
show_content = False
|
|
||||||
return (data, show_content)
|
|
||||||
except (IOError, OSError) as e:
|
|
||||||
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)))
|
|
||||||
|
|
||||||
def _handle_error(self, yaml_exc, file_name, show_content):
|
|
||||||
'''
|
|
||||||
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
|
|
||||||
file name/position where a YAML exception occurred, and raises an AnsibleParserError
|
|
||||||
to display the syntax exception information.
|
|
||||||
'''
|
|
||||||
|
|
||||||
# if the YAML exception contains a problem mark, use it to construct
|
|
||||||
# an object the error class can use to display the faulty line
|
|
||||||
err_obj = None
|
|
||||||
if hasattr(yaml_exc, 'problem_mark'):
|
|
||||||
err_obj = AnsibleBaseYAMLObject()
|
|
||||||
err_obj.set_position_info(file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
|
|
||||||
|
|
||||||
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
|
|
||||||
|
|
||||||
def get_basedir(self):
|
|
||||||
''' returns the current basedir '''
|
|
||||||
return self._basedir
|
|
||||||
|
|
||||||
def set_basedir(self, basedir):
|
|
||||||
''' sets the base directory, used to find files when a relative path is given '''
|
|
||||||
|
|
||||||
if basedir is not None:
|
|
||||||
self._basedir = basedir
|
|
||||||
|
|
||||||
def path_dwim(self, given):
|
|
||||||
'''
|
|
||||||
make relative paths work like folks expect.
|
|
||||||
'''
|
|
||||||
|
|
||||||
given = unquote(given)
|
|
||||||
|
|
||||||
if given.startswith("/"):
|
|
||||||
return os.path.abspath(given)
|
|
||||||
elif given.startswith("~"):
|
|
||||||
return os.path.abspath(os.path.expanduser(given))
|
|
||||||
else:
|
|
||||||
return os.path.abspath(os.path.join(self._basedir, given))
|
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ __metaclass__ = type
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
from ansible.parsing.yaml import DataLoader
|
from ansible.parsing import DataLoader
|
||||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||||
from ansible.playbook.play import Play
|
from ansible.playbook.play import Play
|
||||||
from ansible.plugins import push_basedir
|
from ansible.plugins import push_basedir
|
||||||
|
@ -33,34 +33,33 @@ __all__ = ['Playbook']
|
||||||
|
|
||||||
class Playbook:
|
class Playbook:
|
||||||
|
|
||||||
def __init__(self, loader=None):
|
def __init__(self, loader):
|
||||||
# Entries in the datastructure of a playbook may
|
# Entries in the datastructure of a playbook may
|
||||||
# be either a play or an include statement
|
# be either a play or an include statement
|
||||||
self._entries = []
|
self._entries = []
|
||||||
self._basedir = '.'
|
self._basedir = os.getcwd()
|
||||||
|
|
||||||
if loader:
|
|
||||||
self._loader = loader
|
self._loader = loader
|
||||||
else:
|
|
||||||
self._loader = DataLoader()
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(file_name, loader=None):
|
def load(file_name, variable_manager=None, loader=None):
|
||||||
pb = Playbook(loader=loader)
|
pb = Playbook(loader=loader)
|
||||||
pb._load_playbook_data(file_name)
|
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
|
||||||
return pb
|
return pb
|
||||||
|
|
||||||
def _load_playbook_data(self, file_name):
|
def _load_playbook_data(self, file_name, variable_manager):
|
||||||
|
|
||||||
# add the base directory of the file to the data loader,
|
if os.path.isabs(file_name):
|
||||||
# so that it knows where to find relatively pathed files
|
self._basedir = os.path.dirname(file_name)
|
||||||
basedir = os.path.dirname(file_name)
|
else:
|
||||||
self._loader.set_basedir(basedir)
|
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
|
||||||
|
|
||||||
|
# set the loaders basedir
|
||||||
|
self._loader.set_basedir(self._basedir)
|
||||||
|
|
||||||
# also add the basedir to the list of module directories
|
# also add the basedir to the list of module directories
|
||||||
push_basedir(basedir)
|
push_basedir(self._basedir)
|
||||||
|
|
||||||
ds = self._loader.load_from_file(file_name)
|
ds = self._loader.load_from_file(os.path.basename(file_name))
|
||||||
if not isinstance(ds, list):
|
if not isinstance(ds, list):
|
||||||
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
|
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
|
||||||
|
|
||||||
|
@ -72,11 +71,14 @@ class Playbook:
|
||||||
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
|
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
|
||||||
|
|
||||||
if 'include' in entry:
|
if 'include' in entry:
|
||||||
entry_obj = PlaybookInclude.load(entry, loader=self._loader)
|
entry_obj = PlaybookInclude.load(entry, variable_manager=variable_manager, loader=self._loader)
|
||||||
else:
|
else:
|
||||||
entry_obj = Play.load(entry, loader=self._loader)
|
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
|
||||||
|
|
||||||
self._entries.append(entry_obj)
|
self._entries.append(entry_obj)
|
||||||
|
|
||||||
|
def get_loader(self):
|
||||||
|
return self._loader
|
||||||
|
|
||||||
def get_entries(self):
|
def get_entries(self):
|
||||||
return self._entries[:]
|
return self._entries[:]
|
||||||
|
|
|
@ -21,11 +21,12 @@ __metaclass__ = type
|
||||||
|
|
||||||
class Attribute:
|
class Attribute:
|
||||||
|
|
||||||
def __init__(self, isa=None, private=False, default=None):
|
def __init__(self, isa=None, private=False, default=None, required=False):
|
||||||
|
|
||||||
self.isa = isa
|
self.isa = isa
|
||||||
self.private = private
|
self.private = private
|
||||||
self.default = default
|
self.default = default
|
||||||
|
self.required = required
|
||||||
|
|
||||||
class FieldAttribute(Attribute):
|
class FieldAttribute(Attribute):
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -19,22 +19,36 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
|
||||||
from inspect import getmembers
|
from inspect import getmembers
|
||||||
from io import FileIO
|
from io import FileIO
|
||||||
|
|
||||||
from six import iteritems, string_types
|
from six import iteritems, string_types
|
||||||
|
|
||||||
|
from jinja2.exceptions import UndefinedError
|
||||||
|
|
||||||
from ansible.errors import AnsibleParserError
|
from ansible.errors import AnsibleParserError
|
||||||
|
from ansible.parsing import DataLoader
|
||||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||||
from ansible.parsing.yaml import DataLoader
|
from ansible.template import Templar
|
||||||
|
from ansible.utils.boolean import boolean
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
from ansible.template import template
|
||||||
|
|
||||||
class Base:
|
class Base:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
||||||
# initialize the data loader, this will be provided later
|
# initialize the data loader and variable manager, which will be provided
|
||||||
# when the object is actually loaded
|
# later when the object is actually loaded
|
||||||
self._loader = None
|
self._loader = None
|
||||||
|
self._variable_manager = None
|
||||||
|
|
||||||
|
# every object gets a random uuid:
|
||||||
|
self._uuid = uuid.uuid4()
|
||||||
|
|
||||||
# each class knows attributes set upon it, see Task.py for example
|
# each class knows attributes set upon it, see Task.py for example
|
||||||
self._attributes = dict()
|
self._attributes = dict()
|
||||||
|
@ -60,11 +74,15 @@ class Base:
|
||||||
|
|
||||||
return ds
|
return ds
|
||||||
|
|
||||||
def load_data(self, ds, loader=None):
|
def load_data(self, ds, variable_manager=None, loader=None):
|
||||||
''' walk the input datastructure and assign any values '''
|
''' walk the input datastructure and assign any values '''
|
||||||
|
|
||||||
assert ds is not None
|
assert ds is not None
|
||||||
|
|
||||||
|
# the variable manager class is used to manage and merge variables
|
||||||
|
# down to a single dictionary for reference in templating, etc.
|
||||||
|
self._variable_manager = variable_manager
|
||||||
|
|
||||||
# the data loader class is used to parse data from strings and files
|
# the data loader class is used to parse data from strings and files
|
||||||
if loader is not None:
|
if loader is not None:
|
||||||
self._loader = loader
|
self._loader = loader
|
||||||
|
@ -94,13 +112,24 @@ class Base:
|
||||||
else:
|
else:
|
||||||
self._attributes[name] = ds[name]
|
self._attributes[name] = ds[name]
|
||||||
|
|
||||||
# return the constructed object
|
# run early, non-critical validation
|
||||||
self.validate()
|
self.validate()
|
||||||
|
|
||||||
|
# cache the datastructure internally
|
||||||
|
self._ds = ds
|
||||||
|
|
||||||
|
# return the constructed object
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def get_ds(self):
|
||||||
|
return self._ds
|
||||||
|
|
||||||
def get_loader(self):
|
def get_loader(self):
|
||||||
return self._loader
|
return self._loader
|
||||||
|
|
||||||
|
def get_variable_manager(self):
|
||||||
|
return self._variable_manager
|
||||||
|
|
||||||
def _validate_attributes(self, ds):
|
def _validate_attributes(self, ds):
|
||||||
'''
|
'''
|
||||||
Ensures that there are no keys in the datastructure which do
|
Ensures that there are no keys in the datastructure which do
|
||||||
|
@ -112,7 +141,7 @@ class Base:
|
||||||
if key not in valid_attrs:
|
if key not in valid_attrs:
|
||||||
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
|
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
|
||||||
|
|
||||||
def validate(self):
|
def validate(self, all_vars=dict()):
|
||||||
''' validation that is done at parse time, not load time '''
|
''' validation that is done at parse time, not load time '''
|
||||||
|
|
||||||
# walk all fields in the object
|
# walk all fields in the object
|
||||||
|
@ -121,16 +150,111 @@ class Base:
|
||||||
# run validator only if present
|
# run validator only if present
|
||||||
method = getattr(self, '_validate_%s' % name, None)
|
method = getattr(self, '_validate_%s' % name, None)
|
||||||
if method:
|
if method:
|
||||||
method(self, attribute)
|
method(attribute, name, getattr(self, name))
|
||||||
|
|
||||||
def post_validate(self, runner_context):
|
def copy(self):
|
||||||
|
'''
|
||||||
|
Create a copy of this object and return it.
|
||||||
|
'''
|
||||||
|
|
||||||
|
new_me = self.__class__()
|
||||||
|
|
||||||
|
for (name, attribute) in iteritems(self._get_base_attributes()):
|
||||||
|
setattr(new_me, name, getattr(self, name))
|
||||||
|
|
||||||
|
new_me._loader = self._loader
|
||||||
|
new_me._variable_manager = self._variable_manager
|
||||||
|
|
||||||
|
return new_me
|
||||||
|
|
||||||
|
def post_validate(self, all_vars=dict(), ignore_undefined=False):
|
||||||
'''
|
'''
|
||||||
we can't tell that everything is of the right type until we have
|
we can't tell that everything is of the right type until we have
|
||||||
all the variables. Run basic types (from isa) as well as
|
all the variables. Run basic types (from isa) as well as
|
||||||
any _post_validate_<foo> functions.
|
any _post_validate_<foo> functions.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
raise exception.NotImplementedError
|
basedir = None
|
||||||
|
if self._loader is not None:
|
||||||
|
basedir = self._loader.get_basedir()
|
||||||
|
|
||||||
|
templar = Templar(basedir=basedir, variables=all_vars)
|
||||||
|
|
||||||
|
for (name, attribute) in iteritems(self._get_base_attributes()):
|
||||||
|
|
||||||
|
if getattr(self, name) is None:
|
||||||
|
if not attribute.required:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise AnsibleParserError("the field '%s' is required but was not set" % name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# if the attribute contains a variable, template it now
|
||||||
|
value = templar.template(getattr(self, name))
|
||||||
|
|
||||||
|
# run the post-validator if present
|
||||||
|
method = getattr(self, '_post_validate_%s' % name, None)
|
||||||
|
if method:
|
||||||
|
method(self, attribute, value)
|
||||||
|
else:
|
||||||
|
# otherwise, just make sure the attribute is of the type it should be
|
||||||
|
if attribute.isa == 'string':
|
||||||
|
value = unicode(value)
|
||||||
|
elif attribute.isa == 'int':
|
||||||
|
value = int(value)
|
||||||
|
elif attribute.isa == 'bool':
|
||||||
|
value = boolean(value)
|
||||||
|
elif attribute.isa == 'list':
|
||||||
|
if not isinstance(value, list):
|
||||||
|
value = [ value ]
|
||||||
|
elif attribute.isa == 'dict' and not isinstance(value, dict):
|
||||||
|
raise TypeError()
|
||||||
|
|
||||||
|
# and assign the massaged value back to the attribute field
|
||||||
|
setattr(self, name, value)
|
||||||
|
|
||||||
|
except (TypeError, ValueError), e:
|
||||||
|
#raise AnsibleParserError("the field '%s' has an invalid value, and could not be converted to an %s" % (name, attribute.isa), obj=self.get_ds())
|
||||||
|
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e))
|
||||||
|
except UndefinedError:
|
||||||
|
if not ignore_undefined:
|
||||||
|
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined" % (name,))
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
'''
|
||||||
|
Serializes the object derived from the base object into
|
||||||
|
a dictionary of values. This only serializes the field
|
||||||
|
attributes for the object, so this may need to be overridden
|
||||||
|
for any classes which wish to add additional items not stored
|
||||||
|
as field attributes.
|
||||||
|
'''
|
||||||
|
|
||||||
|
debug("starting serialization of %s" % self.__class__.__name__)
|
||||||
|
repr = dict()
|
||||||
|
|
||||||
|
for (name, attribute) in iteritems(self._get_base_attributes()):
|
||||||
|
repr[name] = getattr(self, name)
|
||||||
|
|
||||||
|
debug("done serializing %s" % self.__class__.__name__)
|
||||||
|
return repr
|
||||||
|
|
||||||
|
def deserialize(self, data):
|
||||||
|
'''
|
||||||
|
Given a dictionary of values, load up the field attributes for
|
||||||
|
this object. As with serialize(), if there are any non-field
|
||||||
|
attribute data members, this method will need to be overridden
|
||||||
|
and extended.
|
||||||
|
'''
|
||||||
|
|
||||||
|
debug("starting deserialization of %s" % self.__class__.__name__)
|
||||||
|
assert isinstance(data, dict)
|
||||||
|
|
||||||
|
for (name, attribute) in iteritems(self._get_base_attributes()):
|
||||||
|
if name in data:
|
||||||
|
setattr(self, name, data[name])
|
||||||
|
else:
|
||||||
|
setattr(self, name, attribute.default)
|
||||||
|
debug("done deserializing %s" % self.__class__.__name__)
|
||||||
|
|
||||||
def __getattr__(self, needle):
|
def __getattr__(self, needle):
|
||||||
|
|
||||||
|
@ -146,3 +270,11 @@ class Base:
|
||||||
return self._attributes[needle]
|
return self._attributes[needle]
|
||||||
|
|
||||||
raise AttributeError("attribute not found: %s" % needle)
|
raise AttributeError("attribute not found: %s" % needle)
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
return self.serialize()
|
||||||
|
|
||||||
|
def __setstate__(self, data):
|
||||||
|
self.__init__()
|
||||||
|
self.deserialize(data)
|
||||||
|
|
||||||
|
|
|
@ -21,25 +21,28 @@ __metaclass__ = type
|
||||||
|
|
||||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||||
from ansible.playbook.base import Base
|
from ansible.playbook.base import Base
|
||||||
|
from ansible.playbook.conditional import Conditional
|
||||||
from ansible.playbook.helpers import load_list_of_tasks
|
from ansible.playbook.helpers import load_list_of_tasks
|
||||||
|
from ansible.playbook.role import Role
|
||||||
|
from ansible.playbook.taggable import Taggable
|
||||||
from ansible.playbook.task_include import TaskInclude
|
from ansible.playbook.task_include import TaskInclude
|
||||||
|
|
||||||
class Block(Base):
|
class Block(Base, Conditional, Taggable):
|
||||||
|
|
||||||
_block = FieldAttribute(isa='list')
|
_block = FieldAttribute(isa='list')
|
||||||
_rescue = FieldAttribute(isa='list')
|
_rescue = FieldAttribute(isa='list')
|
||||||
_always = FieldAttribute(isa='list')
|
_always = FieldAttribute(isa='list')
|
||||||
_tags = FieldAttribute(isa='list', default=[])
|
|
||||||
_when = FieldAttribute(isa='list', default=[])
|
|
||||||
|
|
||||||
# for future consideration? this would be functionally
|
# for future consideration? this would be functionally
|
||||||
# similar to the 'else' clause for exceptions
|
# similar to the 'else' clause for exceptions
|
||||||
#_otherwise = FieldAttribute(isa='list')
|
#_otherwise = FieldAttribute(isa='list')
|
||||||
|
|
||||||
def __init__(self, parent_block=None, role=None, task_include=None):
|
def __init__(self, parent_block=None, role=None, task_include=None, use_handlers=False):
|
||||||
self._parent_block = parent_block
|
self._parent_block = parent_block
|
||||||
self._role = role
|
self._role = role
|
||||||
self._task_include = task_include
|
self._task_include = task_include
|
||||||
|
self._use_handlers = use_handlers
|
||||||
|
|
||||||
super(Block, self).__init__()
|
super(Block, self).__init__()
|
||||||
|
|
||||||
def get_variables(self):
|
def get_variables(self):
|
||||||
|
@ -48,9 +51,9 @@ class Block(Base):
|
||||||
return dict()
|
return dict()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data, parent_block=None, role=None, task_include=None, loader=None):
|
def load(data, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
|
||||||
b = Block(parent_block=parent_block, role=role, task_include=task_include)
|
b = Block(parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers)
|
||||||
return b.load_data(data, loader=loader)
|
return b.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||||
|
|
||||||
def munge(self, ds):
|
def munge(self, ds):
|
||||||
'''
|
'''
|
||||||
|
@ -70,17 +73,17 @@ class Block(Base):
|
||||||
return ds
|
return ds
|
||||||
|
|
||||||
def _load_block(self, attr, ds):
|
def _load_block(self, attr, ds):
|
||||||
return load_list_of_tasks(ds, block=self, loader=self._loader)
|
return load_list_of_tasks(ds, block=self, role=self._role, variable_manager=self._variable_manager, loader=self._loader, use_handlers=self._use_handlers)
|
||||||
|
|
||||||
def _load_rescue(self, attr, ds):
|
def _load_rescue(self, attr, ds):
|
||||||
return load_list_of_tasks(ds, block=self, loader=self._loader)
|
return load_list_of_tasks(ds, block=self, role=self._role, variable_manager=self._variable_manager, loader=self._loader, use_handlers=self._use_handlers)
|
||||||
|
|
||||||
def _load_always(self, attr, ds):
|
def _load_always(self, attr, ds):
|
||||||
return load_list_of_tasks(ds, block=self, loader=self._loader)
|
return load_list_of_tasks(ds, block=self, role=self._role, variable_manager=self._variable_manager, loader=self._loader, use_handlers=self._use_handlers)
|
||||||
|
|
||||||
# not currently used
|
# not currently used
|
||||||
#def _load_otherwise(self, attr, ds):
|
#def _load_otherwise(self, attr, ds):
|
||||||
# return self._load_list_of_tasks(ds, block=self, loader=self._loader)
|
# return self._load_list_of_tasks(ds, block=self, role=self._role, variable_manager=self._variable_manager, loader=self._loader, use_handlers=self._use_handlers)
|
||||||
|
|
||||||
def compile(self):
|
def compile(self):
|
||||||
'''
|
'''
|
||||||
|
@ -93,3 +96,75 @@ class Block(Base):
|
||||||
task_list.extend(task.compile())
|
task_list.extend(task.compile())
|
||||||
|
|
||||||
return task_list
|
return task_list
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
new_me = super(Block, self).copy()
|
||||||
|
new_me._use_handlers = self._use_handlers
|
||||||
|
|
||||||
|
new_me._parent_block = None
|
||||||
|
if self._parent_block:
|
||||||
|
new_me._parent_block = self._parent_block.copy()
|
||||||
|
|
||||||
|
new_me._role = None
|
||||||
|
if self._role:
|
||||||
|
new_me._role = self._role
|
||||||
|
|
||||||
|
new_me._task_include = None
|
||||||
|
if self._task_include:
|
||||||
|
new_me._task_include = self._task_include.copy()
|
||||||
|
|
||||||
|
return new_me
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
'''
|
||||||
|
Override of the default serialize method, since when we're serializing
|
||||||
|
a task we don't want to include the attribute list of tasks.
|
||||||
|
'''
|
||||||
|
|
||||||
|
data = dict(when=self.when)
|
||||||
|
|
||||||
|
if self._role is not None:
|
||||||
|
data['role'] = self._role.serialize()
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
def deserialize(self, data):
|
||||||
|
'''
|
||||||
|
Override of the default deserialize method, to match the above overridden
|
||||||
|
serialize method
|
||||||
|
'''
|
||||||
|
|
||||||
|
# unpack the when attribute, which is the only one we want
|
||||||
|
self.when = data.get('when')
|
||||||
|
|
||||||
|
# if there was a serialized role, unpack it too
|
||||||
|
role_data = data.get('role')
|
||||||
|
if role_data:
|
||||||
|
r = Role()
|
||||||
|
r.deserialize(role_data)
|
||||||
|
self._role = r
|
||||||
|
|
||||||
|
def evaluate_conditional(self, all_vars):
|
||||||
|
if self._parent_block is not None:
|
||||||
|
if not self._parent_block.evaluate_conditional(all_vars):
|
||||||
|
return False
|
||||||
|
if self._role is not None:
|
||||||
|
if not self._role.evaluate_conditional(all_vars):
|
||||||
|
return False
|
||||||
|
return super(Block, self).evaluate_conditional(all_vars)
|
||||||
|
|
||||||
|
def get_tags(self):
|
||||||
|
tags = set(self.tags[:])
|
||||||
|
if self._parent_block:
|
||||||
|
tags.update(self._parent_block.get_tags())
|
||||||
|
if self._role:
|
||||||
|
tags.update(self._role.get_tags())
|
||||||
|
return tags
|
||||||
|
|
||||||
|
#def get_conditionals(self):
|
||||||
|
# conditionals = set(self.when[:])
|
||||||
|
# if self._parent_block:
|
||||||
|
# conditionals.update(self._parent_block.get_conditionals())
|
||||||
|
# if self._role:
|
||||||
|
# conditionals.update(self._role.get_conditionals())
|
||||||
|
# return conditionals
|
||||||
|
|
|
@ -19,16 +19,79 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.errors import *
|
||||||
|
from ansible.playbook.attribute import FieldAttribute
|
||||||
|
from ansible.template import Templar
|
||||||
|
|
||||||
class Conditional:
|
class Conditional:
|
||||||
|
|
||||||
def __init__(self, task):
|
'''
|
||||||
self._task = task
|
This is a mix-in class, to be used with Base to allow the object
|
||||||
self._conditionals = []
|
to be run conditionally when a condition is met or skipped.
|
||||||
|
'''
|
||||||
|
|
||||||
def evaluate(self, context):
|
_when = FieldAttribute(isa='list', default=[])
|
||||||
pass
|
|
||||||
|
def __init__(self):
|
||||||
|
super(Conditional, self).__init__()
|
||||||
|
|
||||||
|
def _validate_when(self, attr, name, value):
|
||||||
|
if not isinstance(value, list):
|
||||||
|
setattr(self, name, [ value ])
|
||||||
|
|
||||||
|
def evaluate_conditional(self, all_vars):
|
||||||
|
'''
|
||||||
|
Loops through the conditionals set on this object, returning
|
||||||
|
False if any of them evaluate as such.
|
||||||
|
'''
|
||||||
|
|
||||||
|
templar = Templar(variables=all_vars)
|
||||||
|
for conditional in self.when:
|
||||||
|
if not self._check_conditional(conditional, templar):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _check_conditional(self, conditional, templar):
|
||||||
|
'''
|
||||||
|
This method does the low-level evaluation of each conditional
|
||||||
|
set on this object, using jinja2 to wrap the conditionals for
|
||||||
|
evaluation.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if conditional is None or conditional == '':
|
||||||
|
return True
|
||||||
|
elif not isinstance(conditional, basestring):
|
||||||
|
return conditional
|
||||||
|
|
||||||
|
conditional = conditional.replace("jinja2_compare ","")
|
||||||
|
|
||||||
|
# allow variable names
|
||||||
|
#if conditional in inject and '-' not in str(inject[conditional]):
|
||||||
|
# conditional = inject[conditional]
|
||||||
|
|
||||||
|
conditional = templar.template(conditional, convert_bare=True)
|
||||||
|
original = str(conditional).replace("jinja2_compare ","")
|
||||||
|
|
||||||
|
# a Jinja2 evaluation that results in something Python can eval!
|
||||||
|
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
|
||||||
|
conditional = templar.template(presented)
|
||||||
|
|
||||||
|
val = conditional.strip()
|
||||||
|
if val == presented:
|
||||||
|
# the templating failed, meaning most likely a
|
||||||
|
# variable was undefined. If we happened to be
|
||||||
|
# looking for an undefined variable, return True,
|
||||||
|
# otherwise fail
|
||||||
|
if "is undefined" in conditional:
|
||||||
|
return True
|
||||||
|
elif "is defined" in conditional:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise AnsibleError("error while evaluating conditional: %s" % original)
|
||||||
|
elif val == "True":
|
||||||
|
return True
|
||||||
|
elif val == "False":
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise AnsibleError("unable to evaluate conditional: %s" % original)
|
||||||
|
|
||||||
def push(self, conditionals):
|
|
||||||
if not isinstance(conditionals, list):
|
|
||||||
conditionals = [ conditionals ]
|
|
||||||
self._conditionals.extend(conditionals)
|
|
||||||
|
|
|
@ -19,22 +19,35 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
from v2.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from v2.inventory import Host
|
#from ansible.inventory.host import Host
|
||||||
from v2.playbook import Task
|
from ansible.playbook.task import Task
|
||||||
|
|
||||||
class Handler(Task):
|
class Handler(Task):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, block=None, role=None, task_include=None):
|
||||||
pass
|
self._flagged_hosts = []
|
||||||
|
|
||||||
|
super(Handler, self).__init__(block=block, role=role, task_include=task_include)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
''' returns a human readable representation of the handler '''
|
||||||
|
return "HANDLER: %s" % self.get_name()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
|
||||||
|
t = Handler(block=block, role=role, task_include=task_include)
|
||||||
|
return t.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||||
|
|
||||||
def flag_for_host(self, host):
|
def flag_for_host(self, host):
|
||||||
assert instanceof(host, Host)
|
#assert instanceof(host, Host)
|
||||||
pass
|
if host not in self._flagged_hosts:
|
||||||
|
self._flagged_hosts.append(host)
|
||||||
|
|
||||||
def has_triggered(self):
|
def has_triggered(self, host):
|
||||||
return self._triggered
|
return host in self._flagged_hosts
|
||||||
|
|
||||||
def set_triggered(self, triggered):
|
def serialize(self):
|
||||||
assert instanceof(triggered, bool)
|
result = super(Handler, self).serialize()
|
||||||
self._triggered = triggered
|
result['is_handler'] = True
|
||||||
|
return result
|
||||||
|
|
|
@ -24,7 +24,7 @@ from ansible.errors import AnsibleParserError
|
||||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
|
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
|
||||||
|
|
||||||
|
|
||||||
def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, loader=None):
|
def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
|
||||||
'''
|
'''
|
||||||
Given a list of mixed task/block data (parsed from YAML),
|
Given a list of mixed task/block data (parsed from YAML),
|
||||||
return a list of Block() objects, where implicit blocks
|
return a list of Block() objects, where implicit blocks
|
||||||
|
@ -39,19 +39,28 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, loa
|
||||||
block_list = []
|
block_list = []
|
||||||
if ds:
|
if ds:
|
||||||
for block in ds:
|
for block in ds:
|
||||||
b = Block.load(block, parent_block=parent_block, role=role, task_include=task_include, loader=loader)
|
b = Block.load(
|
||||||
|
block,
|
||||||
|
parent_block=parent_block,
|
||||||
|
role=role,
|
||||||
|
task_include=task_include,
|
||||||
|
use_handlers=use_handlers,
|
||||||
|
variable_manager=variable_manager,
|
||||||
|
loader=loader
|
||||||
|
)
|
||||||
block_list.append(b)
|
block_list.append(b)
|
||||||
|
|
||||||
return block_list
|
return block_list
|
||||||
|
|
||||||
|
|
||||||
def load_list_of_tasks(ds, block=None, role=None, task_include=None, loader=None):
|
def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
|
||||||
'''
|
'''
|
||||||
Given a list of task datastructures (parsed from YAML),
|
Given a list of task datastructures (parsed from YAML),
|
||||||
return a list of Task() or TaskInclude() objects.
|
return a list of Task() or TaskInclude() objects.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# we import here to prevent a circular dependency with imports
|
# we import here to prevent a circular dependency with imports
|
||||||
|
from ansible.playbook.handler import Handler
|
||||||
from ansible.playbook.task import Task
|
from ansible.playbook.task import Task
|
||||||
from ansible.playbook.task_include import TaskInclude
|
from ansible.playbook.task_include import TaskInclude
|
||||||
|
|
||||||
|
@ -70,19 +79,29 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, loader=None
|
||||||
cur_basedir = loader.get_basedir()
|
cur_basedir = loader.get_basedir()
|
||||||
loader.set_basedir(new_basedir)
|
loader.set_basedir(new_basedir)
|
||||||
|
|
||||||
t = TaskInclude.load(task, block=block, role=role, task_include=task_include, loader=loader)
|
t = TaskInclude.load(
|
||||||
|
task,
|
||||||
|
block=block,
|
||||||
|
role=role,
|
||||||
|
task_include=task_include,
|
||||||
|
use_handlers=use_handlers,
|
||||||
|
loader=loader
|
||||||
|
)
|
||||||
|
|
||||||
if cur_basedir and loader:
|
if cur_basedir and loader:
|
||||||
loader.set_basedir(cur_basedir)
|
loader.set_basedir(cur_basedir)
|
||||||
else:
|
else:
|
||||||
t = Task.load(task, block=block, role=role, task_include=task_include, loader=loader)
|
if use_handlers:
|
||||||
|
t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||||
|
else:
|
||||||
|
t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
|
||||||
|
|
||||||
task_list.append(t)
|
task_list.append(t)
|
||||||
|
|
||||||
return task_list
|
return task_list
|
||||||
|
|
||||||
|
|
||||||
def load_list_of_roles(ds, loader=None):
|
def load_list_of_roles(ds, variable_manager=None, loader=None):
|
||||||
'''
|
'''
|
||||||
Loads and returns a list of RoleInclude objects from the datastructure
|
Loads and returns a list of RoleInclude objects from the datastructure
|
||||||
list of role definitions
|
list of role definitions
|
||||||
|
@ -95,7 +114,7 @@ def load_list_of_roles(ds, loader=None):
|
||||||
|
|
||||||
roles = []
|
roles = []
|
||||||
for role_def in ds:
|
for role_def in ds:
|
||||||
i = RoleInclude.load(role_def, loader=loader)
|
i = RoleInclude.load(role_def, variable_manager=variable_manager, loader=loader)
|
||||||
roles.append(i)
|
roles.append(i)
|
||||||
|
|
||||||
return roles
|
return roles
|
||||||
|
|
|
@ -21,18 +21,19 @@ __metaclass__ = type
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
|
|
||||||
from ansible.parsing.yaml import DataLoader
|
|
||||||
|
|
||||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||||
from ansible.playbook.base import Base
|
from ansible.playbook.base import Base
|
||||||
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list
|
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list
|
||||||
from ansible.playbook.role import Role
|
from ansible.playbook.role import Role
|
||||||
|
from ansible.playbook.taggable import Taggable
|
||||||
|
|
||||||
|
from ansible.utils.vars import combine_vars
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['Play']
|
__all__ = ['Play']
|
||||||
|
|
||||||
|
|
||||||
class Play(Base):
|
class Play(Base, Taggable):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
A play is a language feature that represents a list of roles and/or
|
A play is a language feature that represents a list of roles and/or
|
||||||
|
@ -51,15 +52,16 @@ class Play(Base):
|
||||||
_accelerate_port = FieldAttribute(isa='int', default=5099)
|
_accelerate_port = FieldAttribute(isa='int', default=5099)
|
||||||
_connection = FieldAttribute(isa='string', default='smart')
|
_connection = FieldAttribute(isa='string', default='smart')
|
||||||
_gather_facts = FieldAttribute(isa='string', default='smart')
|
_gather_facts = FieldAttribute(isa='string', default='smart')
|
||||||
_hosts = FieldAttribute(isa='list', default=[])
|
_hosts = FieldAttribute(isa='list', default=[], required=True)
|
||||||
_name = FieldAttribute(isa='string', default='<no name specified>')
|
_name = FieldAttribute(isa='string', default='<no name specified>')
|
||||||
_port = FieldAttribute(isa='int', default=22)
|
_port = FieldAttribute(isa='int', default=22)
|
||||||
_remote_user = FieldAttribute(isa='string', default='root')
|
_remote_user = FieldAttribute(isa='string', default='root')
|
||||||
_su = FieldAttribute(isa='bool', default=False)
|
_su = FieldAttribute(isa='bool', default=False)
|
||||||
_su_user = FieldAttribute(isa='string', default='root')
|
_su_user = FieldAttribute(isa='string', default='root')
|
||||||
|
_su_pass = FieldAttribute(isa='string')
|
||||||
_sudo = FieldAttribute(isa='bool', default=False)
|
_sudo = FieldAttribute(isa='bool', default=False)
|
||||||
_sudo_user = FieldAttribute(isa='string', default='root')
|
_sudo_user = FieldAttribute(isa='string', default='root')
|
||||||
_tags = FieldAttribute(isa='list', default=[])
|
_sudo_pass = FieldAttribute(isa='string')
|
||||||
|
|
||||||
# Variable Attributes
|
# Variable Attributes
|
||||||
_vars = FieldAttribute(isa='dict', default=dict())
|
_vars = FieldAttribute(isa='dict', default=dict())
|
||||||
|
@ -81,6 +83,7 @@ class Play(Base):
|
||||||
_max_fail_percentage = FieldAttribute(isa='string', default='0')
|
_max_fail_percentage = FieldAttribute(isa='string', default='0')
|
||||||
_no_log = FieldAttribute(isa='bool', default=False)
|
_no_log = FieldAttribute(isa='bool', default=False)
|
||||||
_serial = FieldAttribute(isa='int', default=0)
|
_serial = FieldAttribute(isa='int', default=0)
|
||||||
|
_strategy = FieldAttribute(isa='string', default='linear')
|
||||||
|
|
||||||
# =================================================================================
|
# =================================================================================
|
||||||
|
|
||||||
|
@ -95,9 +98,9 @@ class Play(Base):
|
||||||
return "PLAY: %s" % self._attributes.get('name')
|
return "PLAY: %s" % self._attributes.get('name')
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data, loader=None):
|
def load(data, variable_manager=None, loader=None):
|
||||||
p = Play()
|
p = Play()
|
||||||
return p.load_data(data, loader=loader)
|
return p.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||||
|
|
||||||
def munge(self, ds):
|
def munge(self, ds):
|
||||||
'''
|
'''
|
||||||
|
@ -120,40 +123,68 @@ class Play(Base):
|
||||||
|
|
||||||
return ds
|
return ds
|
||||||
|
|
||||||
|
def _load_vars(self, attr, ds):
|
||||||
|
'''
|
||||||
|
Vars in a play can be specified either as a dictionary directly, or
|
||||||
|
as a list of dictionaries. If the later, this method will turn the
|
||||||
|
list into a single dictionary.
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
if isinstance(ds, dict):
|
||||||
|
return ds
|
||||||
|
elif isinstance(ds, list):
|
||||||
|
all_vars = dict()
|
||||||
|
for item in ds:
|
||||||
|
if not isinstance(item, dict):
|
||||||
|
raise ValueError
|
||||||
|
all_vars = combine_vars(all_vars, item)
|
||||||
|
return all_vars
|
||||||
|
else:
|
||||||
|
raise ValueError
|
||||||
|
except ValueError:
|
||||||
|
raise AnsibleParsingError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds)
|
||||||
|
|
||||||
def _load_tasks(self, attr, ds):
|
def _load_tasks(self, attr, ds):
|
||||||
'''
|
'''
|
||||||
Loads a list of blocks from a list which may be mixed tasks/blocks.
|
Loads a list of blocks from a list which may be mixed tasks/blocks.
|
||||||
Bare tasks outside of a block are given an implicit block.
|
Bare tasks outside of a block are given an implicit block.
|
||||||
'''
|
'''
|
||||||
return load_list_of_blocks(ds, loader=self._loader)
|
return load_list_of_blocks(ds, variable_manager=self._variable_manager, loader=self._loader)
|
||||||
|
|
||||||
def _load_pre_tasks(self, attr, ds):
|
def _load_pre_tasks(self, attr, ds):
|
||||||
'''
|
'''
|
||||||
Loads a list of blocks from a list which may be mixed tasks/blocks.
|
Loads a list of blocks from a list which may be mixed tasks/blocks.
|
||||||
Bare tasks outside of a block are given an implicit block.
|
Bare tasks outside of a block are given an implicit block.
|
||||||
'''
|
'''
|
||||||
return load_list_of_blocks(ds, loader=self._loader)
|
return load_list_of_blocks(ds, variable_manager=self._variable_manager, loader=self._loader)
|
||||||
|
|
||||||
def _load_post_tasks(self, attr, ds):
|
def _load_post_tasks(self, attr, ds):
|
||||||
'''
|
'''
|
||||||
Loads a list of blocks from a list which may be mixed tasks/blocks.
|
Loads a list of blocks from a list which may be mixed tasks/blocks.
|
||||||
Bare tasks outside of a block are given an implicit block.
|
Bare tasks outside of a block are given an implicit block.
|
||||||
'''
|
'''
|
||||||
return load_list_of_blocks(ds, loader=self._loader)
|
return load_list_of_blocks(ds, variable_manager=self._variable_manager, loader=self._loader)
|
||||||
|
|
||||||
def _load_handlers(self, attr, ds):
|
def _load_handlers(self, attr, ds):
|
||||||
'''
|
'''
|
||||||
Loads a list of blocks from a list which may be mixed handlers/blocks.
|
Loads a list of blocks from a list which may be mixed handlers/blocks.
|
||||||
Bare handlers outside of a block are given an implicit block.
|
Bare handlers outside of a block are given an implicit block.
|
||||||
'''
|
'''
|
||||||
return load_list_of_blocks(ds, loader=self._loader)
|
return load_list_of_blocks(ds, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
|
||||||
|
|
||||||
def _load_roles(self, attr, ds):
|
def _load_roles(self, attr, ds):
|
||||||
'''
|
'''
|
||||||
Loads and returns a list of RoleInclude objects from the datastructure
|
Loads and returns a list of RoleInclude objects from the datastructure
|
||||||
list of role definitions
|
list of role definitions and creates the Role from those objects
|
||||||
'''
|
'''
|
||||||
return load_list_of_roles(ds, loader=self._loader)
|
|
||||||
|
role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader)
|
||||||
|
|
||||||
|
roles = []
|
||||||
|
for ri in role_includes:
|
||||||
|
roles.append(Role.load(ri))
|
||||||
|
return roles
|
||||||
|
|
||||||
# FIXME: post_validation needs to ensure that su/sudo are not both set
|
# FIXME: post_validation needs to ensure that su/sudo are not both set
|
||||||
|
|
||||||
|
@ -169,13 +200,8 @@ class Play(Base):
|
||||||
task_list = []
|
task_list = []
|
||||||
|
|
||||||
if len(self.roles) > 0:
|
if len(self.roles) > 0:
|
||||||
for ri in self.roles:
|
for r in self.roles:
|
||||||
# The internal list of roles are actually RoleInclude objects,
|
task_list.extend(r.compile())
|
||||||
# so we load the role from that now
|
|
||||||
role = Role.load(ri)
|
|
||||||
|
|
||||||
# FIXME: evauluate conditional of roles here?
|
|
||||||
task_list.extend(role.compile())
|
|
||||||
|
|
||||||
return task_list
|
return task_list
|
||||||
|
|
||||||
|
@ -194,3 +220,40 @@ class Play(Base):
|
||||||
task_list.extend(compile_block_list(self.post_tasks))
|
task_list.extend(compile_block_list(self.post_tasks))
|
||||||
|
|
||||||
return task_list
|
return task_list
|
||||||
|
|
||||||
|
def get_vars(self):
|
||||||
|
return self.vars.copy()
|
||||||
|
|
||||||
|
def get_vars_files(self):
|
||||||
|
return self.vars_files
|
||||||
|
|
||||||
|
def get_handlers(self):
|
||||||
|
return self.handlers[:]
|
||||||
|
|
||||||
|
def get_roles(self):
|
||||||
|
return self.roles[:]
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
data = super(Play, self).serialize()
|
||||||
|
|
||||||
|
roles = []
|
||||||
|
for role in self.get_roles():
|
||||||
|
roles.append(role.serialize())
|
||||||
|
data['roles'] = roles
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
def deserialize(self, data):
|
||||||
|
super(Play, self).deserialize(data)
|
||||||
|
|
||||||
|
if 'roles' in data:
|
||||||
|
role_data = data.get('roles', [])
|
||||||
|
roles = []
|
||||||
|
for role in role_data:
|
||||||
|
r = Role()
|
||||||
|
r.deserialize(role)
|
||||||
|
roles.append(r)
|
||||||
|
|
||||||
|
setattr(self, 'roles', roles)
|
||||||
|
del data['roles']
|
||||||
|
|
||||||
|
|
|
@ -27,12 +27,15 @@ from hashlib import sha1
|
||||||
from types import NoneType
|
from types import NoneType
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
from ansible.parsing.yaml import DataLoader
|
from ansible.parsing import DataLoader
|
||||||
from ansible.playbook.attribute import FieldAttribute
|
from ansible.playbook.attribute import FieldAttribute
|
||||||
from ansible.playbook.base import Base
|
from ansible.playbook.base import Base
|
||||||
|
from ansible.playbook.conditional import Conditional
|
||||||
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
|
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
|
||||||
from ansible.playbook.role.include import RoleInclude
|
from ansible.playbook.role.include import RoleInclude
|
||||||
from ansible.playbook.role.metadata import RoleMetadata
|
from ansible.playbook.role.metadata import RoleMetadata
|
||||||
|
from ansible.playbook.taggable import Taggable
|
||||||
|
from ansible.utils.vars import combine_vars
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['Role', 'ROLE_CACHE']
|
__all__ = ['Role', 'ROLE_CACHE']
|
||||||
|
@ -45,7 +48,7 @@ __all__ = ['Role', 'ROLE_CACHE']
|
||||||
ROLE_CACHE = dict()
|
ROLE_CACHE = dict()
|
||||||
|
|
||||||
|
|
||||||
class Role:
|
class Role(Base, Conditional, Taggable):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._role_name = None
|
self._role_name = None
|
||||||
|
@ -60,6 +63,10 @@ class Role:
|
||||||
self._handler_blocks = []
|
self._handler_blocks = []
|
||||||
self._default_vars = dict()
|
self._default_vars = dict()
|
||||||
self._role_vars = dict()
|
self._role_vars = dict()
|
||||||
|
self._had_task_run = False
|
||||||
|
self._completed = False
|
||||||
|
|
||||||
|
super(Role, self).__init__()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return self.get_name()
|
return self.get_name()
|
||||||
|
@ -71,25 +78,53 @@ class Role:
|
||||||
def load(role_include, parent_role=None):
|
def load(role_include, parent_role=None):
|
||||||
# FIXME: add back in the role caching support
|
# FIXME: add back in the role caching support
|
||||||
try:
|
try:
|
||||||
|
# The ROLE_CACHE is a dictionary of role names, with each entry
|
||||||
|
# containing another dictionary corresponding to a set of parameters
|
||||||
|
# specified for a role as the key and the Role() object itself.
|
||||||
|
# We use frozenset to make the dictionary hashable.
|
||||||
|
|
||||||
|
hashed_params = frozenset(role_include.get_role_params().iteritems())
|
||||||
|
if role_include.role in ROLE_CACHE:
|
||||||
|
for (entry, role_obj) in ROLE_CACHE[role_include.role].iteritems():
|
||||||
|
if hashed_params == entry:
|
||||||
|
if parent_role:
|
||||||
|
role_obj.add_parent(parent_role)
|
||||||
|
return role_obj
|
||||||
|
|
||||||
r = Role()
|
r = Role()
|
||||||
r._load_role_data(role_include, parent_role=parent_role)
|
r._load_role_data(role_include, parent_role=parent_role)
|
||||||
|
|
||||||
|
if role_include.role not in ROLE_CACHE:
|
||||||
|
ROLE_CACHE[role_include.role] = dict()
|
||||||
|
|
||||||
|
ROLE_CACHE[role_include.role][hashed_params] = r
|
||||||
|
return r
|
||||||
|
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
# FIXME: needs a better way to access the ds in the role include
|
# FIXME: needs a better way to access the ds in the role include
|
||||||
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds)
|
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds)
|
||||||
return r
|
|
||||||
|
|
||||||
def _load_role_data(self, role_include, parent_role=None):
|
def _load_role_data(self, role_include, parent_role=None):
|
||||||
self._role_name = role_include.role
|
self._role_name = role_include.role
|
||||||
self._role_path = role_include.get_role_path()
|
self._role_path = role_include.get_role_path()
|
||||||
self._role_params = role_include.get_role_params()
|
self._role_params = role_include.get_role_params()
|
||||||
|
self._variable_manager = role_include.get_variable_manager()
|
||||||
self._loader = role_include.get_loader()
|
self._loader = role_include.get_loader()
|
||||||
|
|
||||||
if parent_role:
|
if parent_role:
|
||||||
self.add_parent(parent_role)
|
self.add_parent(parent_role)
|
||||||
|
|
||||||
|
current_when = getattr(self, 'when')[:]
|
||||||
|
current_when.extend(role_include.when)
|
||||||
|
setattr(self, 'when', current_when)
|
||||||
|
|
||||||
|
current_tags = getattr(self, 'tags')[:]
|
||||||
|
current_tags.extend(role_include.tags)
|
||||||
|
setattr(self, 'tags', current_tags)
|
||||||
|
|
||||||
# save the current base directory for the loader and set it to the current role path
|
# save the current base directory for the loader and set it to the current role path
|
||||||
cur_basedir = self._loader.get_basedir()
|
#cur_basedir = self._loader.get_basedir()
|
||||||
self._loader.set_basedir(self._role_path)
|
#self._loader.set_basedir(self._role_path)
|
||||||
|
|
||||||
# load the role's files, if they exist
|
# load the role's files, if they exist
|
||||||
metadata = self._load_role_yaml('meta')
|
metadata = self._load_role_yaml('meta')
|
||||||
|
@ -109,13 +144,17 @@ class Role:
|
||||||
self._role_vars = self._load_role_yaml('vars')
|
self._role_vars = self._load_role_yaml('vars')
|
||||||
if not isinstance(self._role_vars, (dict, NoneType)):
|
if not isinstance(self._role_vars, (dict, NoneType)):
|
||||||
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds)
|
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds)
|
||||||
|
elif self._role_vars is None:
|
||||||
|
self._role_vars = dict()
|
||||||
|
|
||||||
self._default_vars = self._load_role_yaml('defaults')
|
self._default_vars = self._load_role_yaml('defaults')
|
||||||
if not isinstance(self._default_vars, (dict, NoneType)):
|
if not isinstance(self._default_vars, (dict, NoneType)):
|
||||||
raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds)
|
raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds)
|
||||||
|
elif self._default_vars is None:
|
||||||
|
self._default_vars = dict()
|
||||||
|
|
||||||
# and finally restore the previous base directory
|
# and finally restore the previous base directory
|
||||||
self._loader.set_basedir(cur_basedir)
|
#self._loader.set_basedir(cur_basedir)
|
||||||
|
|
||||||
def _load_role_yaml(self, subdir):
|
def _load_role_yaml(self, subdir):
|
||||||
file_path = os.path.join(self._role_path, subdir)
|
file_path = os.path.join(self._role_path, subdir)
|
||||||
|
@ -169,29 +208,62 @@ class Role:
|
||||||
def get_parents(self):
|
def get_parents(self):
|
||||||
return self._parents
|
return self._parents
|
||||||
|
|
||||||
# FIXME: not yet used
|
def get_default_vars(self):
|
||||||
#def get_variables(self):
|
# FIXME: get these from dependent roles too
|
||||||
# # returns the merged variables for this role, including
|
default_vars = dict()
|
||||||
# # recursively merging those of all child roles
|
for dep in self.get_all_dependencies():
|
||||||
# return dict()
|
default_vars = combine_vars(default_vars, dep.get_default_vars())
|
||||||
|
default_vars = combine_vars(default_vars, self._default_vars)
|
||||||
|
return default_vars
|
||||||
|
|
||||||
|
def get_inherited_vars(self):
|
||||||
|
inherited_vars = dict()
|
||||||
|
for parent in self._parents:
|
||||||
|
inherited_vars = combine_vars(inherited_vars, parent.get_inherited_vars())
|
||||||
|
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
|
||||||
|
inherited_vars = combine_vars(inherited_vars, parent._role_params)
|
||||||
|
return inherited_vars
|
||||||
|
|
||||||
|
def get_vars(self):
|
||||||
|
all_vars = self.get_inherited_vars()
|
||||||
|
|
||||||
|
for dep in self.get_all_dependencies():
|
||||||
|
all_vars = combine_vars(all_vars, dep.get_vars())
|
||||||
|
|
||||||
|
all_vars = combine_vars(all_vars, self._role_vars)
|
||||||
|
all_vars = combine_vars(all_vars, self._role_params)
|
||||||
|
|
||||||
|
return all_vars
|
||||||
|
|
||||||
|
def get_tags(self):
|
||||||
|
tags = set(self.tags[:])
|
||||||
|
for parent in self._parents:
|
||||||
|
tags.update(parent.get_tags())
|
||||||
|
return tags
|
||||||
|
|
||||||
|
#def get_conditionals(self):
|
||||||
|
# conditionals = set(self.when[:])
|
||||||
|
# for parent in self._parents:
|
||||||
|
# conditionals.update(parent.get_conditionals())
|
||||||
|
# return conditionals
|
||||||
|
|
||||||
def get_direct_dependencies(self):
|
def get_direct_dependencies(self):
|
||||||
return self._dependencies[:]
|
return self._dependencies[:]
|
||||||
|
|
||||||
def get_all_dependencies(self):
|
def get_all_dependencies(self):
|
||||||
# returns a list built recursively, of all deps from
|
'''
|
||||||
# all child dependencies
|
Returns a list of all deps, built recursively from all child dependencies,
|
||||||
|
in the proper order in which they should be executed or evaluated.
|
||||||
|
'''
|
||||||
|
|
||||||
child_deps = []
|
child_deps = []
|
||||||
direct_deps = self.get_direct_dependencies()
|
|
||||||
|
|
||||||
for dep in direct_deps:
|
for dep in self.get_direct_dependencies():
|
||||||
dep_deps = dep.get_all_dependencies()
|
for child_dep in dep.get_all_dependencies():
|
||||||
for dep_dep in dep_deps:
|
child_deps.append(child_dep)
|
||||||
if dep_dep not in child_deps:
|
child_deps.append(dep)
|
||||||
child_deps.append(dep_dep)
|
|
||||||
|
|
||||||
return direct_deps + child_deps
|
return child_deps
|
||||||
|
|
||||||
def get_task_blocks(self):
|
def get_task_blocks(self):
|
||||||
return self._task_blocks[:]
|
return self._task_blocks[:]
|
||||||
|
@ -199,6 +271,14 @@ class Role:
|
||||||
def get_handler_blocks(self):
|
def get_handler_blocks(self):
|
||||||
return self._handler_blocks[:]
|
return self._handler_blocks[:]
|
||||||
|
|
||||||
|
def has_run(self):
|
||||||
|
'''
|
||||||
|
Returns true if this role has been iterated over completely and
|
||||||
|
at least one task was run
|
||||||
|
'''
|
||||||
|
|
||||||
|
return self._had_task_run and self._completed
|
||||||
|
|
||||||
def compile(self):
|
def compile(self):
|
||||||
'''
|
'''
|
||||||
Returns the task list for this role, which is created by first
|
Returns the task list for this role, which is created by first
|
||||||
|
@ -216,3 +296,75 @@ class Role:
|
||||||
|
|
||||||
return task_list
|
return task_list
|
||||||
|
|
||||||
|
def serialize(self, include_deps=True):
|
||||||
|
res = super(Role, self).serialize()
|
||||||
|
|
||||||
|
res['_role_name'] = self._role_name
|
||||||
|
res['_role_path'] = self._role_path
|
||||||
|
res['_role_vars'] = self._role_vars
|
||||||
|
res['_role_params'] = self._role_params
|
||||||
|
res['_default_vars'] = self._default_vars
|
||||||
|
res['_had_task_run'] = self._had_task_run
|
||||||
|
res['_completed'] = self._completed
|
||||||
|
|
||||||
|
if self._metadata:
|
||||||
|
res['_metadata'] = self._metadata.serialize()
|
||||||
|
|
||||||
|
if include_deps:
|
||||||
|
deps = []
|
||||||
|
for role in self.get_direct_dependencies():
|
||||||
|
deps.append(role.serialize())
|
||||||
|
res['_dependencies'] = deps
|
||||||
|
|
||||||
|
parents = []
|
||||||
|
for parent in self._parents:
|
||||||
|
parents.append(parent.serialize(include_deps=False))
|
||||||
|
res['_parents'] = parents
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
def deserialize(self, data, include_deps=True):
|
||||||
|
self._role_name = data.get('_role_name', '')
|
||||||
|
self._role_path = data.get('_role_path', '')
|
||||||
|
self._role_vars = data.get('_role_vars', dict())
|
||||||
|
self._role_params = data.get('_role_params', dict())
|
||||||
|
self._default_vars = data.get('_default_vars', dict())
|
||||||
|
self._had_task_run = data.get('_had_task_run', False)
|
||||||
|
self._completed = data.get('_completed', False)
|
||||||
|
|
||||||
|
if include_deps:
|
||||||
|
deps = []
|
||||||
|
for dep in data.get('_dependencies', []):
|
||||||
|
r = Role()
|
||||||
|
r.deserialize(dep)
|
||||||
|
deps.append(r)
|
||||||
|
setattr(self, '_dependencies', deps)
|
||||||
|
|
||||||
|
parent_data = data.get('_parents', [])
|
||||||
|
parents = []
|
||||||
|
for parent in parent_data:
|
||||||
|
r = Role()
|
||||||
|
r.deserialize(parent, include_deps=False)
|
||||||
|
parents.append(r)
|
||||||
|
setattr(self, '_parents', parents)
|
||||||
|
|
||||||
|
metadata_data = data.get('_metadata')
|
||||||
|
if metadata_data:
|
||||||
|
m = RoleMetadata()
|
||||||
|
m.deserialize(metadata_data)
|
||||||
|
self._metadata = m
|
||||||
|
|
||||||
|
super(Role, self).deserialize(data)
|
||||||
|
|
||||||
|
def evaluate_conditional(self, all_vars):
|
||||||
|
parent_conditionals = True
|
||||||
|
if len(self._parents) > 0:
|
||||||
|
parent_conditionals = False
|
||||||
|
for parent in self._parents:
|
||||||
|
parent_conditionals |= parent.evaluate_conditional(all_vars)
|
||||||
|
|
||||||
|
if not parent_conditionals:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return super(Role, self).evaluate_conditional(all_vars)
|
||||||
|
|
||||||
|
|
|
@ -27,12 +27,14 @@ from ansible.errors import AnsibleError
|
||||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
|
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
|
||||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||||
from ansible.playbook.base import Base
|
from ansible.playbook.base import Base
|
||||||
|
from ansible.playbook.conditional import Conditional
|
||||||
|
from ansible.playbook.taggable import Taggable
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['RoleDefinition']
|
__all__ = ['RoleDefinition']
|
||||||
|
|
||||||
|
|
||||||
class RoleDefinition(Base):
|
class RoleDefinition(Base, Conditional, Taggable):
|
||||||
|
|
||||||
_role = FieldAttribute(isa='string')
|
_role = FieldAttribute(isa='string')
|
||||||
|
|
||||||
|
@ -45,7 +47,7 @@ class RoleDefinition(Base):
|
||||||
return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
|
return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data, loader=None):
|
def load(data, variable_manager=None, loader=None):
|
||||||
raise AnsibleError("not implemented")
|
raise AnsibleError("not implemented")
|
||||||
|
|
||||||
def munge(self, ds):
|
def munge(self, ds):
|
||||||
|
@ -116,7 +118,7 @@ class RoleDefinition(Base):
|
||||||
return (role_name, role_path)
|
return (role_name, role_path)
|
||||||
else:
|
else:
|
||||||
# FIXME: this should search in the configured roles path
|
# FIXME: this should search in the configured roles path
|
||||||
for path in ('./roles', '/etc/ansible/roles'):
|
for path in (os.path.join(self._loader.get_basedir(), 'roles'), './roles', '/etc/ansible/roles'):
|
||||||
role_path = os.path.join(path, role_name)
|
role_path = os.path.join(path, role_name)
|
||||||
if self._loader.path_exists(role_path):
|
if self._loader.path_exists(role_path):
|
||||||
return (role_name, role_path)
|
return (role_name, role_path)
|
||||||
|
|
|
@ -37,16 +37,13 @@ class RoleInclude(RoleDefinition):
|
||||||
FIXME: docstring
|
FIXME: docstring
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_tags = FieldAttribute(isa='list', default=[])
|
|
||||||
_when = FieldAttribute(isa='list', default=[])
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(RoleInclude, self).__init__()
|
super(RoleInclude, self).__init__()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data, parent_role=None, loader=None):
|
def load(data, parent_role=None, variable_manager=None, loader=None):
|
||||||
assert isinstance(data, string_types) or isinstance(data, dict)
|
assert isinstance(data, string_types) or isinstance(data, dict)
|
||||||
|
|
||||||
ri = RoleInclude()
|
ri = RoleInclude()
|
||||||
return ri.load_data(data, loader=loader)
|
return ri.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ class RoleMetadata(Base):
|
||||||
super(RoleMetadata, self).__init__()
|
super(RoleMetadata, self).__init__()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data, owner, loader=None):
|
def load(data, owner, variable_manager=None, loader=None):
|
||||||
'''
|
'''
|
||||||
Returns a new RoleMetadata object based on the datastructure passed in.
|
Returns a new RoleMetadata object based on the datastructure passed in.
|
||||||
'''
|
'''
|
||||||
|
@ -54,7 +54,7 @@ class RoleMetadata(Base):
|
||||||
if not isinstance(data, dict):
|
if not isinstance(data, dict):
|
||||||
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
|
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
|
||||||
|
|
||||||
m = RoleMetadata().load_data(data, loader=loader)
|
m = RoleMetadata().load_data(data, variable_manager=variable_manager, loader=loader)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def _load_dependencies(self, attr, ds):
|
def _load_dependencies(self, attr, ds):
|
||||||
|
@ -62,7 +62,7 @@ class RoleMetadata(Base):
|
||||||
This is a helper loading function for the dependencies list,
|
This is a helper loading function for the dependencies list,
|
||||||
which returns a list of RoleInclude objects
|
which returns a list of RoleInclude objects
|
||||||
'''
|
'''
|
||||||
return load_list_of_roles(ds, loader=self._loader)
|
return load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader)
|
||||||
|
|
||||||
def _load_galaxy_info(self, attr, ds):
|
def _load_galaxy_info(self, attr, ds):
|
||||||
'''
|
'''
|
||||||
|
@ -72,3 +72,13 @@ class RoleMetadata(Base):
|
||||||
'''
|
'''
|
||||||
|
|
||||||
return ds
|
return ds
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
return dict(
|
||||||
|
allow_duplicates = self.allow_duplicates,
|
||||||
|
dependencies = self.dependencies,
|
||||||
|
)
|
||||||
|
|
||||||
|
def deserialize(self, data):
|
||||||
|
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
|
||||||
|
setattr(self, 'dependencies', data.get('dependencies', []))
|
||||||
|
|
|
@ -1,55 +0,0 @@
|
||||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Make coding more python3-ish
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from errors import AnsibleError
|
|
||||||
from ansible.utils import list_union
|
|
||||||
|
|
||||||
class Tag:
|
|
||||||
def __init__(self, tags=[]):
|
|
||||||
assert isinstance(tags, list)
|
|
||||||
self._tags = tags
|
|
||||||
|
|
||||||
def push(self, tags):
|
|
||||||
if not isinstance(tags, list):
|
|
||||||
tags = [ tags ]
|
|
||||||
for tag in tags:
|
|
||||||
if not isinstance(tag, basestring):
|
|
||||||
tag = str(tag)
|
|
||||||
if tag not in self._tags:
|
|
||||||
self._tags.append(tag)
|
|
||||||
|
|
||||||
def get_tags(self):
|
|
||||||
return self._tags
|
|
||||||
|
|
||||||
def merge(self, tags):
|
|
||||||
# returns a union of the tags, which can be a string,
|
|
||||||
# a list of strings, or another Tag() class
|
|
||||||
if isinstance(tags, basestring):
|
|
||||||
tags = Tag([tags])
|
|
||||||
elif isinstance(tags, list):
|
|
||||||
tags = Tag(tags)
|
|
||||||
elif not isinstance(tags, Tag):
|
|
||||||
raise AnsibleError('expected a Tag() instance, instead got %s' % type(tags))
|
|
||||||
return utils.list_union(self._tags, tags.get_tags())
|
|
||||||
|
|
||||||
def matches(self, tag):
|
|
||||||
return tag in self._tags
|
|
||||||
|
|
46
v2/ansible/playbook/taggable.py
Normal file
46
v2/ansible/playbook/taggable.py
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.playbook.attribute import FieldAttribute
|
||||||
|
|
||||||
|
class Taggable:
|
||||||
|
_tags = FieldAttribute(isa='list', default=[])
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(Taggable, self).__init__()
|
||||||
|
|
||||||
|
def get_tags(self):
|
||||||
|
return self._tags[:]
|
||||||
|
|
||||||
|
def evaluate_tags(self, only_tags, skip_tags):
|
||||||
|
my_tags = self.get_tags()
|
||||||
|
|
||||||
|
if skip_tags:
|
||||||
|
skipped_tags = my_tags.intersection(skip_tags)
|
||||||
|
if len(skipped_tags) > 0:
|
||||||
|
return False
|
||||||
|
|
||||||
|
matched_tags = my_tags.intersection(only_tags)
|
||||||
|
if len(matched_tags) > 0 or 'all' in only_tags:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
|
@ -19,18 +19,22 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
from ansible.playbook.base import Base
|
|
||||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
|
|
||||||
from ansible.parsing.splitter import parse_kv
|
|
||||||
from ansible.parsing.mod_args import ModuleArgsParser
|
from ansible.parsing.mod_args import ModuleArgsParser
|
||||||
from ansible.parsing.yaml import DataLoader
|
from ansible.parsing.splitter import parse_kv
|
||||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
|
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
|
||||||
from ansible.plugins import module_finder, lookup_finder
|
|
||||||
|
|
||||||
class Task(Base):
|
from ansible.plugins import module_loader, lookup_loader
|
||||||
|
|
||||||
|
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||||
|
from ansible.playbook.base import Base
|
||||||
|
from ansible.playbook.block import Block
|
||||||
|
from ansible.playbook.conditional import Conditional
|
||||||
|
from ansible.playbook.role import Role
|
||||||
|
from ansible.playbook.taggable import Taggable
|
||||||
|
|
||||||
|
class Task(Base, Conditional, Taggable):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
A task is a language feature that represents a call to a module, with given arguments and other parameters.
|
A task is a language feature that represents a call to a module, with given arguments and other parameters.
|
||||||
|
@ -54,10 +58,10 @@ class Task(Base):
|
||||||
|
|
||||||
_always_run = FieldAttribute(isa='bool')
|
_always_run = FieldAttribute(isa='bool')
|
||||||
_any_errors_fatal = FieldAttribute(isa='bool')
|
_any_errors_fatal = FieldAttribute(isa='bool')
|
||||||
_async = FieldAttribute(isa='int')
|
_async = FieldAttribute(isa='int', default=0)
|
||||||
_changed_when = FieldAttribute(isa='string')
|
_changed_when = FieldAttribute(isa='string')
|
||||||
_connection = FieldAttribute(isa='string')
|
_connection = FieldAttribute(isa='string')
|
||||||
_delay = FieldAttribute(isa='int')
|
_delay = FieldAttribute(isa='int', default=0)
|
||||||
_delegate_to = FieldAttribute(isa='string')
|
_delegate_to = FieldAttribute(isa='string')
|
||||||
_environment = FieldAttribute(isa='dict')
|
_environment = FieldAttribute(isa='dict')
|
||||||
_failed_when = FieldAttribute(isa='string')
|
_failed_when = FieldAttribute(isa='string')
|
||||||
|
@ -75,10 +79,10 @@ class Task(Base):
|
||||||
|
|
||||||
_no_log = FieldAttribute(isa='bool')
|
_no_log = FieldAttribute(isa='bool')
|
||||||
_notify = FieldAttribute(isa='list')
|
_notify = FieldAttribute(isa='list')
|
||||||
_poll = FieldAttribute(isa='integer')
|
_poll = FieldAttribute(isa='int')
|
||||||
_register = FieldAttribute(isa='string')
|
_register = FieldAttribute(isa='string')
|
||||||
_remote_user = FieldAttribute(isa='string')
|
_remote_user = FieldAttribute(isa='string')
|
||||||
_retries = FieldAttribute(isa='integer')
|
_retries = FieldAttribute(isa='int', default=1)
|
||||||
_run_once = FieldAttribute(isa='bool')
|
_run_once = FieldAttribute(isa='bool')
|
||||||
_su = FieldAttribute(isa='bool')
|
_su = FieldAttribute(isa='bool')
|
||||||
_su_pass = FieldAttribute(isa='string')
|
_su_pass = FieldAttribute(isa='string')
|
||||||
|
@ -86,10 +90,8 @@ class Task(Base):
|
||||||
_sudo = FieldAttribute(isa='bool')
|
_sudo = FieldAttribute(isa='bool')
|
||||||
_sudo_user = FieldAttribute(isa='string')
|
_sudo_user = FieldAttribute(isa='string')
|
||||||
_sudo_pass = FieldAttribute(isa='string')
|
_sudo_pass = FieldAttribute(isa='string')
|
||||||
_tags = FieldAttribute(isa='list', default=[])
|
|
||||||
_transport = FieldAttribute(isa='string')
|
_transport = FieldAttribute(isa='string')
|
||||||
_until = FieldAttribute(isa='list') # ?
|
_until = FieldAttribute(isa='list') # ?
|
||||||
_when = FieldAttribute(isa='list', default=[])
|
|
||||||
|
|
||||||
def __init__(self, block=None, role=None, task_include=None):
|
def __init__(self, block=None, role=None, task_include=None):
|
||||||
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
|
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
|
||||||
|
@ -104,11 +106,14 @@ class Task(Base):
|
||||||
''' return the name of the task '''
|
''' return the name of the task '''
|
||||||
|
|
||||||
if self._role and self.name:
|
if self._role and self.name:
|
||||||
return "%s : %s" % (self._role.name, self.name)
|
return "%s : %s" % (self._role.get_name(), self.name)
|
||||||
elif self.name:
|
elif self.name:
|
||||||
return self.name
|
return self.name
|
||||||
else:
|
else:
|
||||||
flattened_args = self._merge_kv(self.args)
|
flattened_args = self._merge_kv(self.args)
|
||||||
|
if self._role:
|
||||||
|
return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
|
||||||
|
else:
|
||||||
return "%s %s" % (self.action, flattened_args)
|
return "%s %s" % (self.action, flattened_args)
|
||||||
|
|
||||||
def _merge_kv(self, ds):
|
def _merge_kv(self, ds):
|
||||||
|
@ -126,9 +131,9 @@ class Task(Base):
|
||||||
return buf
|
return buf
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data, block=None, role=None, task_include=None, loader=None):
|
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
|
||||||
t = Task(block=block, role=role, task_include=task_include)
|
t = Task(block=block, role=role, task_include=task_include)
|
||||||
return t.load_data(data, loader=loader)
|
return t.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
''' returns a human readable representation of the task '''
|
''' returns a human readable representation of the task '''
|
||||||
|
@ -173,13 +178,32 @@ class Task(Base):
|
||||||
# we don't want to re-assign these values, which were
|
# we don't want to re-assign these values, which were
|
||||||
# determined by the ModuleArgsParser() above
|
# determined by the ModuleArgsParser() above
|
||||||
continue
|
continue
|
||||||
elif k.replace("with_", "") in lookup_finder:
|
elif k.replace("with_", "") in lookup_loader:
|
||||||
self._munge_loop(ds, new_ds, k, v)
|
self._munge_loop(ds, new_ds, k, v)
|
||||||
else:
|
else:
|
||||||
new_ds[k] = v
|
new_ds[k] = v
|
||||||
|
|
||||||
return new_ds
|
return new_ds
|
||||||
|
|
||||||
|
def get_vars(self):
|
||||||
|
return self.serialize()
|
||||||
|
|
||||||
|
def get_tags(self):
|
||||||
|
tags = set(self.tags[:])
|
||||||
|
if self._block:
|
||||||
|
tags.update(self._block.get_tags())
|
||||||
|
if self._role:
|
||||||
|
tags.update(self._role.get_tags())
|
||||||
|
return tags
|
||||||
|
|
||||||
|
#def get_conditionals(self):
|
||||||
|
# conditionals = set(self.when[:])
|
||||||
|
# if self._block:
|
||||||
|
# conditionals.update(self._block.get_conditionals())
|
||||||
|
# if self._role:
|
||||||
|
# conditionals.update(self._role.get_conditionals())
|
||||||
|
# return conditionals
|
||||||
|
|
||||||
def compile(self):
|
def compile(self):
|
||||||
'''
|
'''
|
||||||
For tasks, this is just a dummy method returning an array
|
For tasks, this is just a dummy method returning an array
|
||||||
|
@ -188,3 +212,65 @@ class Task(Base):
|
||||||
'''
|
'''
|
||||||
|
|
||||||
return [self]
|
return [self]
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
new_me = super(Task, self).copy()
|
||||||
|
|
||||||
|
new_me._block = None
|
||||||
|
if self._block:
|
||||||
|
new_me._block = self._block.copy()
|
||||||
|
|
||||||
|
new_me._role = None
|
||||||
|
if self._role:
|
||||||
|
new_me._role = self._role
|
||||||
|
|
||||||
|
new_me._task_include = None
|
||||||
|
if self._task_include:
|
||||||
|
new_me._task_include = self._task_include.copy()
|
||||||
|
|
||||||
|
return new_me
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
data = super(Task, self).serialize()
|
||||||
|
|
||||||
|
if self._block:
|
||||||
|
data['block'] = self._block.serialize()
|
||||||
|
|
||||||
|
if self._role:
|
||||||
|
data['role'] = self._role.serialize()
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
def deserialize(self, data):
|
||||||
|
block_data = data.get('block')
|
||||||
|
if block_data:
|
||||||
|
b = Block()
|
||||||
|
b.deserialize(block_data)
|
||||||
|
self._block = b
|
||||||
|
del data['block']
|
||||||
|
|
||||||
|
role_data = data.get('role')
|
||||||
|
if role_data:
|
||||||
|
r = Role()
|
||||||
|
r.deserialize(role_data)
|
||||||
|
self._role = r
|
||||||
|
del data['role']
|
||||||
|
|
||||||
|
super(Task, self).deserialize(data)
|
||||||
|
|
||||||
|
def evaluate_conditional(self, all_vars):
|
||||||
|
if self._block is not None:
|
||||||
|
if not self._block.evaluate_conditional(all_vars):
|
||||||
|
return False
|
||||||
|
return super(Task, self).evaluate_conditional(all_vars)
|
||||||
|
|
||||||
|
def post_validate(self, all_vars=dict(), ignore_undefined=False):
|
||||||
|
'''
|
||||||
|
'''
|
||||||
|
|
||||||
|
if self._block:
|
||||||
|
self._block.post_validate(all_vars=all_vars, ignore_undefined=ignore_undefined)
|
||||||
|
if self._role:
|
||||||
|
self._role.post_validate(all_vars=all_vars, ignore_undefined=ignore_undefined)
|
||||||
|
|
||||||
|
super(Task, self).post_validate(all_vars=all_vars, ignore_undefined=ignore_undefined)
|
||||||
|
|
|
@ -25,7 +25,7 @@ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
|
||||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||||
from ansible.playbook.base import Base
|
from ansible.playbook.base import Base
|
||||||
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
|
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
|
||||||
from ansible.plugins import lookup_finder
|
from ansible.plugins import lookup_loader
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['TaskInclude']
|
__all__ = ['TaskInclude']
|
||||||
|
@ -66,9 +66,9 @@ class TaskInclude(Base):
|
||||||
super(TaskInclude, self).__init__()
|
super(TaskInclude, self).__init__()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data, block=None, role=None, task_include=None, loader=None):
|
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
|
||||||
ti = TaskInclude(block=block, role=role, task_include=None)
|
ti = TaskInclude(block=block, role=role, task_include=None)
|
||||||
return ti.load_data(data, loader=loader)
|
return ti.load_data(data, variable_manager=variable_manager, loader=loader)
|
||||||
|
|
||||||
def munge(self, ds):
|
def munge(self, ds):
|
||||||
'''
|
'''
|
||||||
|
@ -87,7 +87,7 @@ class TaskInclude(Base):
|
||||||
for (k,v) in ds.iteritems():
|
for (k,v) in ds.iteritems():
|
||||||
if k == 'include':
|
if k == 'include':
|
||||||
self._munge_include(ds, new_ds, k, v)
|
self._munge_include(ds, new_ds, k, v)
|
||||||
elif k.replace("with_", "") in lookup_finder:
|
elif k.replace("with_", "") in lookup_loader:
|
||||||
self._munge_loop(ds, new_ds, k, v)
|
self._munge_loop(ds, new_ds, k, v)
|
||||||
else:
|
else:
|
||||||
# some basic error checking, to make sure vars are properly
|
# some basic error checking, to make sure vars are properly
|
||||||
|
@ -148,6 +148,7 @@ class TaskInclude(Base):
|
||||||
parent_block=self._block,
|
parent_block=self._block,
|
||||||
task_include=self,
|
task_include=self,
|
||||||
role=self._role,
|
role=self._role,
|
||||||
|
variable_manager=self._variable_manager,
|
||||||
loader=self._loader
|
loader=self._loader
|
||||||
)
|
)
|
||||||
return ds
|
return ds
|
||||||
|
|
|
@ -240,7 +240,7 @@ callback_loader = PluginLoader(
|
||||||
|
|
||||||
connection_loader = PluginLoader(
|
connection_loader = PluginLoader(
|
||||||
'Connection',
|
'Connection',
|
||||||
'ansible.plugins.connection',
|
'ansible.plugins.connections',
|
||||||
C.DEFAULT_CONNECTION_PLUGIN_PATH,
|
C.DEFAULT_CONNECTION_PLUGIN_PATH,
|
||||||
'connection_plugins',
|
'connection_plugins',
|
||||||
aliases={'paramiko': 'paramiko_ssh'}
|
aliases={'paramiko': 'paramiko_ssh'}
|
||||||
|
@ -253,37 +253,44 @@ shell_loader = PluginLoader(
|
||||||
'shell_plugins',
|
'shell_plugins',
|
||||||
)
|
)
|
||||||
|
|
||||||
module_finder = PluginLoader(
|
module_loader = PluginLoader(
|
||||||
'',
|
'',
|
||||||
'ansible.modules',
|
'ansible.modules',
|
||||||
C.DEFAULT_MODULE_PATH,
|
C.DEFAULT_MODULE_PATH,
|
||||||
'library'
|
'library'
|
||||||
)
|
)
|
||||||
|
|
||||||
lookup_finder = PluginLoader(
|
lookup_loader = PluginLoader(
|
||||||
'LookupModule',
|
'LookupModule',
|
||||||
'ansible.plugins.lookup',
|
'ansible.plugins.lookup',
|
||||||
C.DEFAULT_LOOKUP_PLUGIN_PATH,
|
C.DEFAULT_LOOKUP_PLUGIN_PATH,
|
||||||
'lookup_plugins'
|
'lookup_plugins'
|
||||||
)
|
)
|
||||||
|
|
||||||
vars_finder = PluginLoader(
|
vars_loader = PluginLoader(
|
||||||
'VarsModule',
|
'VarsModule',
|
||||||
'ansible.plugins.vars',
|
'ansible.plugins.vars',
|
||||||
C.DEFAULT_VARS_PLUGIN_PATH,
|
C.DEFAULT_VARS_PLUGIN_PATH,
|
||||||
'vars_plugins'
|
'vars_plugins'
|
||||||
)
|
)
|
||||||
|
|
||||||
filter_finder = PluginLoader(
|
filter_loader = PluginLoader(
|
||||||
'FilterModule',
|
'FilterModule',
|
||||||
'ansible.plugins.filter',
|
'ansible.plugins.filter',
|
||||||
C.DEFAULT_FILTER_PLUGIN_PATH,
|
C.DEFAULT_FILTER_PLUGIN_PATH,
|
||||||
'filter_plugins'
|
'filter_plugins'
|
||||||
)
|
)
|
||||||
|
|
||||||
fragment_finder = PluginLoader(
|
fragment_loader = PluginLoader(
|
||||||
'ModuleDocFragment',
|
'ModuleDocFragment',
|
||||||
'ansible.utils.module_docs_fragments',
|
'ansible.utils.module_docs_fragments',
|
||||||
os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
|
os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
|
||||||
'',
|
'',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
strategy_loader = PluginLoader(
|
||||||
|
'StrategyModule',
|
||||||
|
'ansible.plugins.strategies',
|
||||||
|
None,
|
||||||
|
'strategy_plugins',
|
||||||
|
)
|
||||||
|
|
|
@ -19,3 +19,410 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import StringIO
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.executor.module_common import ModuleReplacer
|
||||||
|
from ansible.parsing.utils.jsonify import jsonify
|
||||||
|
from ansible.plugins import module_loader, shell_loader
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
class ActionBase:
|
||||||
|
|
||||||
|
'''
|
||||||
|
This class is the base class for all action plugins, and defines
|
||||||
|
code common to all actions. The base class handles the connection
|
||||||
|
by putting/getting files and executing commands based on the current
|
||||||
|
action in use.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, task, connection, connection_info, loader):
|
||||||
|
self._task = task
|
||||||
|
self._connection = connection
|
||||||
|
self._connection_info = connection_info
|
||||||
|
self._loader = loader
|
||||||
|
self._shell = self.get_shell()
|
||||||
|
|
||||||
|
def get_shell(self):
|
||||||
|
|
||||||
|
# FIXME: no more inject, get this from the host variables?
|
||||||
|
#default_shell = getattr(self._connection, 'default_shell', '')
|
||||||
|
#shell_type = inject.get('ansible_shell_type')
|
||||||
|
#if not shell_type:
|
||||||
|
# if default_shell:
|
||||||
|
# shell_type = default_shell
|
||||||
|
# else:
|
||||||
|
# shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
|
||||||
|
|
||||||
|
shell_type = getattr(self._connection, 'default_shell', '')
|
||||||
|
if not shell_type:
|
||||||
|
shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
|
||||||
|
|
||||||
|
shell_plugin = shell_loader.get(shell_type)
|
||||||
|
if shell_plugin is None:
|
||||||
|
shell_plugin = shell_loader.get('sh')
|
||||||
|
|
||||||
|
return shell_plugin
|
||||||
|
|
||||||
|
def _configure_module(self, module_name, module_args):
|
||||||
|
'''
|
||||||
|
Handles the loading and templating of the module code through the
|
||||||
|
ModuleReplacer class.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Search module path(s) for named module.
|
||||||
|
module_suffixes = getattr(self._connection, 'default_suffixes', None)
|
||||||
|
module_path = module_loader.find_plugin(module_name, module_suffixes, transport=self._connection.get_transport())
|
||||||
|
if module_path is None:
|
||||||
|
module_path2 = module_loader.find_plugin('ping', module_suffixes)
|
||||||
|
if module_path2 is not None:
|
||||||
|
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
|
||||||
|
else:
|
||||||
|
raise AnsibleError("The module %s was not found in configured module paths. " \
|
||||||
|
"Additionally, core modules are missing. If this is a checkout, " \
|
||||||
|
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
|
||||||
|
|
||||||
|
# insert shared code and arguments into the module
|
||||||
|
(module_data, module_style, module_shebang) = ModuleReplacer().modify_module(module_path, module_args)
|
||||||
|
|
||||||
|
return (module_style, module_shebang, module_data)
|
||||||
|
|
||||||
|
def _compute_environment_string(self):
|
||||||
|
'''
|
||||||
|
Builds the environment string to be used when executing the remote task.
|
||||||
|
'''
|
||||||
|
|
||||||
|
enviro = {}
|
||||||
|
|
||||||
|
# FIXME: not sure where this comes from, probably task but maybe also the play?
|
||||||
|
#if self.environment:
|
||||||
|
# enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
|
||||||
|
# enviro = utils.safe_eval(enviro)
|
||||||
|
# if type(enviro) != dict:
|
||||||
|
# raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
|
||||||
|
|
||||||
|
return self._shell.env_prefix(**enviro)
|
||||||
|
|
||||||
|
def _early_needs_tmp_path(self):
|
||||||
|
'''
|
||||||
|
Determines if a temp path should be created before the action is executed.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# FIXME: modified from original, needs testing? Since this is now inside
|
||||||
|
# the action plugin, it should make it just this simple
|
||||||
|
return getattr(self, 'TRANSFERS_FILES', False)
|
||||||
|
|
||||||
|
def _late_needs_tmp_path(self, tmp, module_style):
|
||||||
|
'''
|
||||||
|
Determines if a temp path is required after some early actions have already taken place.
|
||||||
|
'''
|
||||||
|
if tmp and "tmp" in tmp:
|
||||||
|
# tmp has already been created
|
||||||
|
return False
|
||||||
|
if not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.su:
|
||||||
|
# tmp is necessary to store module source code
|
||||||
|
return True
|
||||||
|
if not self._connection._has_pipelining:
|
||||||
|
# tmp is necessary to store the module source code
|
||||||
|
# or we want to keep the files on the target system
|
||||||
|
return True
|
||||||
|
if module_style != "new":
|
||||||
|
# even when conn has pipelining, old style modules need tmp to store arguments
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
# FIXME: return a datastructure in this function instead of raising errors -
|
||||||
|
# the new executor pipeline handles it much better that way
|
||||||
|
def _make_tmp_path(self):
|
||||||
|
'''
|
||||||
|
Create and return a temporary path on a remote box.
|
||||||
|
'''
|
||||||
|
|
||||||
|
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
|
||||||
|
use_system_tmp = False
|
||||||
|
|
||||||
|
if (self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root'):
|
||||||
|
use_system_tmp = True
|
||||||
|
|
||||||
|
tmp_mode = None
|
||||||
|
if self._connection_info.remote_user != 'root' or \
|
||||||
|
((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root')):
|
||||||
|
tmp_mode = 'a+rx'
|
||||||
|
|
||||||
|
cmd = self._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
|
||||||
|
result = self._low_level_execute_command(cmd, None, sudoable=False)
|
||||||
|
|
||||||
|
# error handling on this seems a little aggressive?
|
||||||
|
if result['rc'] != 0:
|
||||||
|
if result['rc'] == 5:
|
||||||
|
output = 'Authentication failure.'
|
||||||
|
elif result['rc'] == 255 and self._connection.get_transport() in ['ssh']:
|
||||||
|
# FIXME: more utils.VERBOSITY
|
||||||
|
#if utils.VERBOSITY > 3:
|
||||||
|
# output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
|
||||||
|
#else:
|
||||||
|
# output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
|
||||||
|
output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
|
||||||
|
elif 'No space left on device' in result['stderr']:
|
||||||
|
output = result['stderr']
|
||||||
|
else:
|
||||||
|
output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
|
||||||
|
if 'stdout' in result and result['stdout'] != '':
|
||||||
|
output = output + ": %s" % result['stdout']
|
||||||
|
raise AnsibleError(output)
|
||||||
|
|
||||||
|
# FIXME: do we still need to do this?
|
||||||
|
#rc = self._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
|
||||||
|
rc = self._shell.join_path(result['stdout'].strip(), '').splitlines()[-1]
|
||||||
|
|
||||||
|
# Catch failure conditions, files should never be
|
||||||
|
# written to locations in /.
|
||||||
|
if rc == '/':
|
||||||
|
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd))
|
||||||
|
|
||||||
|
return rc
|
||||||
|
|
||||||
|
def _remove_tmp_path(self, tmp_path):
|
||||||
|
'''Remove a temporary path we created. '''
|
||||||
|
|
||||||
|
if "-tmp-" in tmp_path:
|
||||||
|
cmd = self._shell.remove(tmp_path, recurse=True)
|
||||||
|
# If we have gotten here we have a working ssh configuration.
|
||||||
|
# If ssh breaks we could leave tmp directories out on the remote system.
|
||||||
|
self._low_level_execute_command(cmd, None, sudoable=False)
|
||||||
|
|
||||||
|
def _transfer_data(self, remote_path, data):
|
||||||
|
'''
|
||||||
|
Copies the module data out to the temporary module path.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if type(data) == dict:
|
||||||
|
data = jsonify(data)
|
||||||
|
|
||||||
|
afd, afile = tempfile.mkstemp()
|
||||||
|
afo = os.fdopen(afd, 'w')
|
||||||
|
try:
|
||||||
|
if not isinstance(data, unicode):
|
||||||
|
#ensure the data is valid UTF-8
|
||||||
|
data = data.decode('utf-8')
|
||||||
|
else:
|
||||||
|
data = data.encode('utf-8')
|
||||||
|
afo.write(data)
|
||||||
|
except Exception, e:
|
||||||
|
raise AnsibleError("failure encoding into utf-8: %s" % str(e))
|
||||||
|
|
||||||
|
afo.flush()
|
||||||
|
afo.close()
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._connection.put_file(afile, remote_path)
|
||||||
|
finally:
|
||||||
|
os.unlink(afile)
|
||||||
|
|
||||||
|
return remote_path
|
||||||
|
|
||||||
|
def _remote_chmod(self, tmp, mode, path, sudoable=False):
|
||||||
|
'''
|
||||||
|
Issue a remote chmod command
|
||||||
|
'''
|
||||||
|
|
||||||
|
cmd = self._shell.chmod(mode, path)
|
||||||
|
return self._low_level_execute_command(cmd, tmp, sudoable=sudoable)
|
||||||
|
|
||||||
|
def _remote_checksum(self, tmp, path):
|
||||||
|
'''
|
||||||
|
Takes a remote checksum and returns 1 if no file
|
||||||
|
'''
|
||||||
|
|
||||||
|
# FIXME: figure out how this will work, probably pulled from the
|
||||||
|
# variable manager data
|
||||||
|
#python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python')
|
||||||
|
python_interp = 'python'
|
||||||
|
cmd = self._shell.checksum(path, python_interp)
|
||||||
|
data = self._low_level_execute_command(cmd, tmp, sudoable=True)
|
||||||
|
# FIXME: implement this function?
|
||||||
|
#data2 = utils.last_non_blank_line(data['stdout'])
|
||||||
|
data2 = data['stdout'].strip().splitlines()[-1]
|
||||||
|
try:
|
||||||
|
if data2 == '':
|
||||||
|
# this may happen if the connection to the remote server
|
||||||
|
# failed, so just return "INVALIDCHECKSUM" to avoid errors
|
||||||
|
return "INVALIDCHECKSUM"
|
||||||
|
else:
|
||||||
|
return data2.split()[0]
|
||||||
|
except IndexError:
|
||||||
|
sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
|
||||||
|
sys.stderr.write("command: %s\n" % cmd)
|
||||||
|
sys.stderr.write("----\n")
|
||||||
|
sys.stderr.write("output: %s\n" % data)
|
||||||
|
sys.stderr.write("----\n")
|
||||||
|
# this will signal that it changed and allow things to keep going
|
||||||
|
return "INVALIDCHECKSUM"
|
||||||
|
|
||||||
|
def _remote_expand_user(self, path, tmp):
|
||||||
|
''' takes a remote path and performs tilde expansion on the remote host '''
|
||||||
|
if not path.startswith('~'):
|
||||||
|
return path
|
||||||
|
|
||||||
|
split_path = path.split(os.path.sep, 1)
|
||||||
|
expand_path = split_path[0]
|
||||||
|
if expand_path == '~':
|
||||||
|
if self._connection_info.sudo and self._connection_info.sudo_user:
|
||||||
|
expand_path = '~%s' % self._connection_info.sudo_user
|
||||||
|
elif self._connection_info.su and self._connection_info.su_user:
|
||||||
|
expand_path = '~%s' % self._connection_info.su_user
|
||||||
|
|
||||||
|
cmd = self._shell.expand_user(expand_path)
|
||||||
|
data = self._low_level_execute_command(cmd, tmp, sudoable=False)
|
||||||
|
#initial_fragment = utils.last_non_blank_line(data['stdout'])
|
||||||
|
initial_fragment = data['stdout'].strip().splitlines()[-1]
|
||||||
|
|
||||||
|
if not initial_fragment:
|
||||||
|
# Something went wrong trying to expand the path remotely. Return
|
||||||
|
# the original string
|
||||||
|
return path
|
||||||
|
|
||||||
|
if len(split_path) > 1:
|
||||||
|
return self._shell.join_path(initial_fragment, *split_path[1:])
|
||||||
|
else:
|
||||||
|
return initial_fragment
|
||||||
|
|
||||||
|
def _filter_leading_non_json_lines(self, data):
|
||||||
|
'''
|
||||||
|
Used to avoid random output from SSH at the top of JSON output, like messages from
|
||||||
|
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
|
||||||
|
|
||||||
|
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
|
||||||
|
filter only leading lines since multiline JSON is valid.
|
||||||
|
'''
|
||||||
|
|
||||||
|
filtered_lines = StringIO.StringIO()
|
||||||
|
stop_filtering = False
|
||||||
|
for line in data.splitlines():
|
||||||
|
if stop_filtering or line.startswith('{') or line.startswith('['):
|
||||||
|
stop_filtering = True
|
||||||
|
filtered_lines.write(line + '\n')
|
||||||
|
return filtered_lines.getvalue()
|
||||||
|
|
||||||
|
def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_files=False, delete_remote_tmp=True):
|
||||||
|
'''
|
||||||
|
Transfer and run a module along with its arguments.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# if a module name was not specified for this execution, use
|
||||||
|
# the action from the task
|
||||||
|
if module_name is None:
|
||||||
|
module_name = self._task.action
|
||||||
|
if module_args is None:
|
||||||
|
module_args = self._task.args
|
||||||
|
|
||||||
|
debug("in _execute_module (%s, %s)" % (module_name, module_args))
|
||||||
|
|
||||||
|
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args)
|
||||||
|
if not shebang:
|
||||||
|
raise AnsibleError("module is missing interpreter line")
|
||||||
|
|
||||||
|
# a remote tmp path may be necessary and not already created
|
||||||
|
remote_module_path = None
|
||||||
|
if self._late_needs_tmp_path(tmp, module_style):
|
||||||
|
tmp = self._make_tmp_path()
|
||||||
|
remote_module_path = self._shell.join_path(tmp, module_name)
|
||||||
|
|
||||||
|
# FIXME: async stuff here
|
||||||
|
#if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES):
|
||||||
|
if remote_module_path:
|
||||||
|
self._transfer_data(remote_module_path, module_data)
|
||||||
|
|
||||||
|
environment_string = self._compute_environment_string()
|
||||||
|
|
||||||
|
if tmp and "tmp" in tmp and ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root')):
|
||||||
|
# deal with possible umask issues once sudo'ed to other user
|
||||||
|
self._remote_chmod(tmp, 'a+r', remote_module_path)
|
||||||
|
|
||||||
|
cmd = ""
|
||||||
|
in_data = None
|
||||||
|
|
||||||
|
# FIXME: all of the old-module style and async stuff has been removed from here, and
|
||||||
|
# might need to be re-added (unless we decide to drop support for old-style modules
|
||||||
|
# at this point and rework things to support non-python modules specifically)
|
||||||
|
if self._connection._has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
|
||||||
|
in_data = module_data
|
||||||
|
else:
|
||||||
|
if remote_module_path:
|
||||||
|
cmd = remote_module_path
|
||||||
|
|
||||||
|
rm_tmp = None
|
||||||
|
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
|
||||||
|
if not self._connection_info.sudo or self._connection_info.su or self._connection_info.sudo_user == 'root' or self._connection_info.su_user == 'root':
|
||||||
|
# not sudoing or sudoing to root, so can cleanup files in the same step
|
||||||
|
rm_tmp = tmp
|
||||||
|
|
||||||
|
cmd = self._shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
|
||||||
|
cmd = cmd.strip()
|
||||||
|
|
||||||
|
sudoable = True
|
||||||
|
if module_name == "accelerate":
|
||||||
|
# always run the accelerate module as the user
|
||||||
|
# specified in the play, not the sudo_user
|
||||||
|
sudoable = False
|
||||||
|
|
||||||
|
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable, in_data=in_data)
|
||||||
|
|
||||||
|
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
|
||||||
|
if (self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root'):
|
||||||
|
# not sudoing to root, so maybe can't delete files as that other user
|
||||||
|
# have to clean up temp files as original user in a second step
|
||||||
|
cmd2 = self._shell.remove(tmp, recurse=True)
|
||||||
|
self._low_level_execute_command(cmd2, tmp, sudoable=False)
|
||||||
|
|
||||||
|
# FIXME: in error situations, the stdout may not contain valid data, so we
|
||||||
|
# should check for bad rc codes better to catch this here
|
||||||
|
data = json.loads(self._filter_leading_non_json_lines(res['stdout']))
|
||||||
|
if 'parsed' in data and data['parsed'] == False:
|
||||||
|
data['msg'] += res['stderr']
|
||||||
|
|
||||||
|
debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=False, in_data=None):
|
||||||
|
'''
|
||||||
|
This is the function which executes the low level shell command, which
|
||||||
|
may be commands to create/remove directories for temporary files, or to
|
||||||
|
run the module code or python directly when pipelining.
|
||||||
|
'''
|
||||||
|
|
||||||
|
debug("in _low_level_execute_command() (%s)" % (cmd,))
|
||||||
|
if not cmd:
|
||||||
|
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
|
||||||
|
debug("no command, exiting _low_level_execute_command()")
|
||||||
|
return dict(stdout='', stderr='')
|
||||||
|
|
||||||
|
if executable is None:
|
||||||
|
executable = C.DEFAULT_EXECUTABLE
|
||||||
|
|
||||||
|
debug("executing the command through the connection")
|
||||||
|
rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data, sudoable=sudoable)
|
||||||
|
debug("command execution done")
|
||||||
|
|
||||||
|
if not isinstance(stdout, basestring):
|
||||||
|
out = ''.join(stdout.readlines())
|
||||||
|
else:
|
||||||
|
out = stdout
|
||||||
|
|
||||||
|
if not isinstance(stderr, basestring):
|
||||||
|
err = ''.join(stderr.readlines())
|
||||||
|
else:
|
||||||
|
err = stderr
|
||||||
|
|
||||||
|
debug("done with _low_level_execute_command() (%s)" % (cmd,))
|
||||||
|
if rc is not None:
|
||||||
|
return dict(rc=rc, stdout=out, stderr=err)
|
||||||
|
else:
|
||||||
|
return dict(stdout=out, stderr=err)
|
||||||
|
|
159
v2/ansible/plugins/action/assemble.py
Normal file
159
v2/ansible/plugins/action/assemble.py
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# Stephen Fromm <sfromm@gmail.com>
|
||||||
|
# Brian Coca <briancoca+dev@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import pipes
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
import base64
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
from ansible.plugins.action import ActionBase
|
||||||
|
from ansible.utils.hashing import checksum_s
|
||||||
|
|
||||||
|
class ActionModule(ActionBase):
|
||||||
|
|
||||||
|
TRANSFERS_FILES = True
|
||||||
|
|
||||||
|
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
|
||||||
|
''' assemble a file from a directory of fragments '''
|
||||||
|
|
||||||
|
tmpfd, temp_path = tempfile.mkstemp()
|
||||||
|
tmp = os.fdopen(tmpfd,'w')
|
||||||
|
delimit_me = False
|
||||||
|
add_newline = False
|
||||||
|
|
||||||
|
for f in sorted(os.listdir(src_path)):
|
||||||
|
if compiled_regexp and not compiled_regexp.search(f):
|
||||||
|
continue
|
||||||
|
fragment = "%s/%s" % (src_path, f)
|
||||||
|
if not os.path.isfile(fragment):
|
||||||
|
continue
|
||||||
|
fragment_content = file(fragment).read()
|
||||||
|
|
||||||
|
# always put a newline between fragments if the previous fragment didn't end with a newline.
|
||||||
|
if add_newline:
|
||||||
|
tmp.write('\n')
|
||||||
|
|
||||||
|
# delimiters should only appear between fragments
|
||||||
|
if delimit_me:
|
||||||
|
if delimiter:
|
||||||
|
# un-escape anything like newlines
|
||||||
|
delimiter = delimiter.decode('unicode-escape')
|
||||||
|
tmp.write(delimiter)
|
||||||
|
# always make sure there's a newline after the
|
||||||
|
# delimiter, so lines don't run together
|
||||||
|
if delimiter[-1] != '\n':
|
||||||
|
tmp.write('\n')
|
||||||
|
|
||||||
|
tmp.write(fragment_content)
|
||||||
|
delimit_me = True
|
||||||
|
if fragment_content.endswith('\n'):
|
||||||
|
add_newline = False
|
||||||
|
else:
|
||||||
|
add_newline = True
|
||||||
|
|
||||||
|
tmp.close()
|
||||||
|
return temp_path
|
||||||
|
|
||||||
|
def run(self, tmp=None, task_vars=dict()):
|
||||||
|
|
||||||
|
src = self._task.args.get('src', None)
|
||||||
|
dest = self._task.args.get('dest', None)
|
||||||
|
delimiter = self._task.args.get('delimiter', None)
|
||||||
|
# FIXME: boolean needs to be moved out of utils
|
||||||
|
#remote_src = utils.boolean(options.get('remote_src', 'yes'))
|
||||||
|
remote_src = self._task.args.get('remote_src', 'yes')
|
||||||
|
regexp = self._task.args.get('regexp', None)
|
||||||
|
|
||||||
|
if src is None or dest is None:
|
||||||
|
return dict(failed=True, msg="src and dest are required")
|
||||||
|
|
||||||
|
# FIXME: this should be boolean, hard-coded to yes for testing
|
||||||
|
if remote_src == 'yes':
|
||||||
|
return self._execute_module(tmp=tmp)
|
||||||
|
# FIXME: we don't do inject anymore, so not sure where the original
|
||||||
|
# file stuff is going to end up at this time
|
||||||
|
#elif '_original_file' in inject:
|
||||||
|
# src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
|
||||||
|
else:
|
||||||
|
# the source is local, so expand it here
|
||||||
|
src = os.path.expanduser(src)
|
||||||
|
|
||||||
|
_re = None
|
||||||
|
if regexp is not None:
|
||||||
|
_re = re.compile(regexp)
|
||||||
|
|
||||||
|
# Does all work assembling the file
|
||||||
|
path = self._assemble_from_fragments(src, delimiter, _re)
|
||||||
|
|
||||||
|
path_checksum = checksum_s(path)
|
||||||
|
dest = self._remote_expand_user(dest, tmp)
|
||||||
|
remote_checksum = self._remote_checksum(tmp, dest)
|
||||||
|
|
||||||
|
if path_checksum != remote_checksum:
|
||||||
|
resultant = file(path).read()
|
||||||
|
# FIXME: diff needs to be moved somewhere else
|
||||||
|
#if self.runner.diff:
|
||||||
|
# dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), tmp=tmp, persist_files=True)
|
||||||
|
# if 'content' in dest_result:
|
||||||
|
# dest_contents = dest_result['content']
|
||||||
|
# if dest_result['encoding'] == 'base64':
|
||||||
|
# dest_contents = base64.b64decode(dest_contents)
|
||||||
|
# else:
|
||||||
|
# raise Exception("unknown encoding, failed: %s" % dest_result)
|
||||||
|
xfered = self._transfer_data('src', resultant)
|
||||||
|
|
||||||
|
# fix file permissions when the copy is done as a different user
|
||||||
|
if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root':
|
||||||
|
self._remote_chmod('a+r', xfered, tmp)
|
||||||
|
|
||||||
|
# run the copy module
|
||||||
|
|
||||||
|
new_module_args = self._task.args.copy()
|
||||||
|
new_module_args.update(
|
||||||
|
dict(
|
||||||
|
src=xfered,
|
||||||
|
dest=dest,
|
||||||
|
original_basename=os.path.basename(src),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: checkmode stuff
|
||||||
|
#if self.runner.noop_on_check(inject):
|
||||||
|
# return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
|
||||||
|
#else:
|
||||||
|
# res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
|
||||||
|
# res.diff = dict(after=resultant)
|
||||||
|
# return res
|
||||||
|
res = self._execute_module(module_name='copy', module_args=new_module_args, tmp=tmp)
|
||||||
|
#res.diff = dict(after=resultant)
|
||||||
|
return res
|
||||||
|
else:
|
||||||
|
new_module_args = self._task.args.copy()
|
||||||
|
new_module_args.update(
|
||||||
|
dict(
|
||||||
|
src=xfered,
|
||||||
|
dest=dest,
|
||||||
|
original_basename=os.path.basename(src),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._execute_module(module_name='file', module_args=new_module_args, tmp=tmp)
|
54
v2/ansible/plugins/action/assert.py
Normal file
54
v2/ansible/plugins/action/assert.py
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
# Copyright 2012, Dag Wieers <dag@wieers.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.plugins.action import ActionBase
|
||||||
|
|
||||||
|
class ActionModule(ActionBase):
|
||||||
|
''' Fail with custom message '''
|
||||||
|
|
||||||
|
TRANSFERS_FILES = False
|
||||||
|
|
||||||
|
def run(self, tmp=None, task_vars=dict()):
|
||||||
|
|
||||||
|
# note: the fail module does not need to pay attention to check mode
|
||||||
|
# it always runs.
|
||||||
|
|
||||||
|
msg = None
|
||||||
|
if 'msg' in self._task.args:
|
||||||
|
msg = self._task.args['msg']
|
||||||
|
|
||||||
|
if not 'that' in self._task.args:
|
||||||
|
raise AnsibleError('conditional required in "that" string')
|
||||||
|
|
||||||
|
for that in self._task.args['that']:
|
||||||
|
self._task.when = [ that ]
|
||||||
|
test_result = self._task.evaluate_conditional(all_vars=task_vars)
|
||||||
|
if not test_result:
|
||||||
|
result = dict(
|
||||||
|
failed = True,
|
||||||
|
evaluated_to = test_result,
|
||||||
|
assertion = that,
|
||||||
|
)
|
||||||
|
|
||||||
|
if msg:
|
||||||
|
result['msg'] = msg
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
return dict(changed=False, msg='all assertions passed')
|
||||||
|
|
384
v2/ansible/plugins/action/copy.py
Normal file
384
v2/ansible/plugins/action/copy.py
Normal file
|
@ -0,0 +1,384 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import pipes
|
||||||
|
import stat
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.plugins.action import ActionBase
|
||||||
|
from ansible.utils.boolean import boolean
|
||||||
|
from ansible.utils.hashing import checksum
|
||||||
|
|
||||||
|
## fixes https://github.com/ansible/ansible/issues/3518
|
||||||
|
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
|
||||||
|
|
||||||
|
import sys
|
||||||
|
reload(sys)
|
||||||
|
sys.setdefaultencoding("utf8")
|
||||||
|
|
||||||
|
|
||||||
|
class ActionModule(ActionBase):
|
||||||
|
|
||||||
|
def run(self, tmp=None, task_vars=dict()):
|
||||||
|
''' handler for file transfer operations '''
|
||||||
|
|
||||||
|
source = self._task.args.get('src', None)
|
||||||
|
content = self._task.args.get('content', None)
|
||||||
|
dest = self._task.args.get('dest', None)
|
||||||
|
raw = boolean(self._task.args.get('raw', 'no'))
|
||||||
|
force = boolean(self._task.args.get('force', 'yes'))
|
||||||
|
|
||||||
|
# content with newlines is going to be escaped to safely load in yaml
|
||||||
|
# now we need to unescape it so that the newlines are evaluated properly
|
||||||
|
# when writing the file to disk
|
||||||
|
if content:
|
||||||
|
if isinstance(content, unicode):
|
||||||
|
try:
|
||||||
|
content = content.decode('unicode-escape')
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# FIXME: first available file needs to be reworked somehow...
|
||||||
|
#if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
|
||||||
|
# result=dict(failed=True, msg="src (or content) and dest are required")
|
||||||
|
# return ReturnData(conn=conn, result=result)
|
||||||
|
#elif (source is not None or 'first_available_file' in inject) and content is not None:
|
||||||
|
# result=dict(failed=True, msg="src and content are mutually exclusive")
|
||||||
|
# return ReturnData(conn=conn, result=result)
|
||||||
|
|
||||||
|
# Check if the source ends with a "/"
|
||||||
|
source_trailing_slash = False
|
||||||
|
if source:
|
||||||
|
source_trailing_slash = source.endswith("/")
|
||||||
|
|
||||||
|
# Define content_tempfile in case we set it after finding content populated.
|
||||||
|
content_tempfile = None
|
||||||
|
|
||||||
|
# If content is defined make a temp file and write the content into it.
|
||||||
|
if content is not None:
|
||||||
|
try:
|
||||||
|
# If content comes to us as a dict it should be decoded json.
|
||||||
|
# We need to encode it back into a string to write it out.
|
||||||
|
if type(content) is dict:
|
||||||
|
content_tempfile = self._create_content_tempfile(json.dumps(content))
|
||||||
|
else:
|
||||||
|
content_tempfile = self._create_content_tempfile(content)
|
||||||
|
source = content_tempfile
|
||||||
|
except Exception, err:
|
||||||
|
result = dict(failed=True, msg="could not write content temp file: %s" % err)
|
||||||
|
return ReturnData(conn=conn, result=result)
|
||||||
|
###############################################################################################
|
||||||
|
# FIXME: first_available_file needs to be reworked?
|
||||||
|
###############################################################################################
|
||||||
|
# if we have first_available_file in our vars
|
||||||
|
# look up the files and use the first one we find as src
|
||||||
|
#elif 'first_available_file' in inject:
|
||||||
|
# found = False
|
||||||
|
# for fn in inject.get('first_available_file'):
|
||||||
|
# fn_orig = fn
|
||||||
|
# fnt = template.template(self.runner.basedir, fn, inject)
|
||||||
|
# fnd = utils.path_dwim(self.runner.basedir, fnt)
|
||||||
|
# if not os.path.exists(fnd) and '_original_file' in inject:
|
||||||
|
# fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
|
||||||
|
# if os.path.exists(fnd):
|
||||||
|
# source = fnd
|
||||||
|
# found = True
|
||||||
|
# break
|
||||||
|
# if not found:
|
||||||
|
# results = dict(failed=True, msg="could not find src in first_available_file list")
|
||||||
|
# return ReturnData(conn=conn, result=results)
|
||||||
|
###############################################################################################
|
||||||
|
else:
|
||||||
|
# FIXME: templating needs to be worked out still
|
||||||
|
#source = template.template(self.runner.basedir, source, inject)
|
||||||
|
# FIXME: original_file stuff needs to be reworked - most likely
|
||||||
|
# simply checking to see if the task has a role and using
|
||||||
|
# using the role path as the dwim target and basedir would work
|
||||||
|
#if '_original_file' in inject:
|
||||||
|
# source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
|
||||||
|
#else:
|
||||||
|
# source = utils.path_dwim(self.runner.basedir, source)
|
||||||
|
source = self._loader.path_dwim(source)
|
||||||
|
|
||||||
|
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
|
||||||
|
source_files = []
|
||||||
|
|
||||||
|
# If source is a directory populate our list else source is a file and translate it to a tuple.
|
||||||
|
if os.path.isdir(source):
|
||||||
|
# Get the amount of spaces to remove to get the relative path.
|
||||||
|
if source_trailing_slash:
|
||||||
|
sz = len(source) + 1
|
||||||
|
else:
|
||||||
|
sz = len(source.rsplit('/', 1)[0]) + 1
|
||||||
|
|
||||||
|
# Walk the directory and append the file tuples to source_files.
|
||||||
|
for base_path, sub_folders, files in os.walk(source):
|
||||||
|
for file in files:
|
||||||
|
full_path = os.path.join(base_path, file)
|
||||||
|
rel_path = full_path[sz:]
|
||||||
|
source_files.append((full_path, rel_path))
|
||||||
|
|
||||||
|
# If it's recursive copy, destination is always a dir,
|
||||||
|
# explicitly mark it so (note - copy module relies on this).
|
||||||
|
if not self._shell.path_has_trailing_slash(dest):
|
||||||
|
dest = self._shell.join_path(dest, '')
|
||||||
|
else:
|
||||||
|
source_files.append((source, os.path.basename(source)))
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
diffs = []
|
||||||
|
module_result = {"changed": False}
|
||||||
|
|
||||||
|
# A register for if we executed a module.
|
||||||
|
# Used to cut down on command calls when not recursive.
|
||||||
|
module_executed = False
|
||||||
|
|
||||||
|
# Tell _execute_module to delete the file if there is one file.
|
||||||
|
delete_remote_tmp = (len(source_files) == 1)
|
||||||
|
|
||||||
|
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
|
||||||
|
if not delete_remote_tmp:
|
||||||
|
if tmp is None or "-tmp-" not in tmp:
|
||||||
|
tmp = self._make_tmp_path()
|
||||||
|
|
||||||
|
# expand any user home dir specifier
|
||||||
|
dest = self._remote_expand_user(dest, tmp)
|
||||||
|
|
||||||
|
for source_full, source_rel in source_files:
|
||||||
|
# Generate a hash of the local file.
|
||||||
|
local_checksum = checksum(source_full)
|
||||||
|
|
||||||
|
# If local_checksum is not defined we can't find the file so we should fail out.
|
||||||
|
if local_checksum is None:
|
||||||
|
return dict(failed=True, msg="could not find src=%s" % source_full)
|
||||||
|
|
||||||
|
# This is kind of optimization - if user told us destination is
|
||||||
|
# dir, do path manipulation right away, otherwise we still check
|
||||||
|
# for dest being a dir via remote call below.
|
||||||
|
if self._shell.path_has_trailing_slash(dest):
|
||||||
|
dest_file = self._shell.join_path(dest, source_rel)
|
||||||
|
else:
|
||||||
|
dest_file = self._shell.join_path(dest)
|
||||||
|
|
||||||
|
# Attempt to get the remote checksum
|
||||||
|
remote_checksum = self._remote_checksum(tmp, dest_file)
|
||||||
|
|
||||||
|
if remote_checksum == '3':
|
||||||
|
# The remote_checksum was executed on a directory.
|
||||||
|
if content is not None:
|
||||||
|
# If source was defined as content remove the temporary file and fail out.
|
||||||
|
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||||
|
return dict(failed=True, msg="can not use content with a dir as dest")
|
||||||
|
else:
|
||||||
|
# Append the relative source location to the destination and retry remote_checksum
|
||||||
|
dest_file = self._shell.join_path(dest, source_rel)
|
||||||
|
remote_checksum = self._remote_checksum(tmp, dest_file)
|
||||||
|
|
||||||
|
if remote_checksum != '1' and not force:
|
||||||
|
# remote_file does not exist so continue to next iteration.
|
||||||
|
continue
|
||||||
|
|
||||||
|
if local_checksum != remote_checksum:
|
||||||
|
# The checksums don't match and we will change or error out.
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
# Create a tmp path if missing only if this is not recursive.
|
||||||
|
# If this is recursive we already have a tmp path.
|
||||||
|
if delete_remote_tmp:
|
||||||
|
if tmp is None or "-tmp-" not in tmp:
|
||||||
|
tmp = self._make_tmp_path()
|
||||||
|
|
||||||
|
# FIXME: runner shouldn't have the diff option there
|
||||||
|
#if self.runner.diff and not raw:
|
||||||
|
# diff = self._get_diff_data(tmp, dest_file, source_full)
|
||||||
|
#else:
|
||||||
|
# diff = {}
|
||||||
|
diff = {}
|
||||||
|
|
||||||
|
# FIXME: noop stuff
|
||||||
|
#if self.runner.noop_on_check(inject):
|
||||||
|
# self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||||
|
# diffs.append(diff)
|
||||||
|
# changed = True
|
||||||
|
# module_result = dict(changed=True)
|
||||||
|
# continue
|
||||||
|
|
||||||
|
# Define a remote directory that we will copy the file to.
|
||||||
|
tmp_src = tmp + 'source'
|
||||||
|
|
||||||
|
if not raw:
|
||||||
|
self._connection.put_file(source_full, tmp_src)
|
||||||
|
else:
|
||||||
|
self._connection.put_file(source_full, dest_file)
|
||||||
|
|
||||||
|
# We have copied the file remotely and no longer require our content_tempfile
|
||||||
|
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||||
|
|
||||||
|
# fix file permissions when the copy is done as a different user
|
||||||
|
if (self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root') and not raw:
|
||||||
|
self._remote_chmod('a+r', tmp_src, tmp)
|
||||||
|
|
||||||
|
if raw:
|
||||||
|
# Continue to next iteration if raw is defined.
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Run the copy module
|
||||||
|
|
||||||
|
# src and dest here come after original and override them
|
||||||
|
# we pass dest only to make sure it includes trailing slash in case of recursive copy
|
||||||
|
new_module_args = self._task.args.copy()
|
||||||
|
new_module_args.update(
|
||||||
|
dict(
|
||||||
|
src=tmp_src,
|
||||||
|
dest=dest,
|
||||||
|
original_basename=source_rel,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: checkmode and no_log stuff
|
||||||
|
#if self.runner.noop_on_check(inject):
|
||||||
|
# new_module_args['CHECKMODE'] = True
|
||||||
|
#if self.runner.no_log:
|
||||||
|
# new_module_args['NO_LOG'] = True
|
||||||
|
|
||||||
|
module_return = self._execute_module(module_name='copy', module_args=new_module_args, tmp=tmp, delete_remote_tmp=delete_remote_tmp)
|
||||||
|
module_executed = True
|
||||||
|
|
||||||
|
else:
|
||||||
|
# no need to transfer the file, already correct hash, but still need to call
|
||||||
|
# the file module in case we want to change attributes
|
||||||
|
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||||
|
|
||||||
|
if raw:
|
||||||
|
# Continue to next iteration if raw is defined.
|
||||||
|
# self._remove_tmp_path(tmp)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Build temporary module_args.
|
||||||
|
new_module_args = self._task.args.copy()
|
||||||
|
new_module_args.update(
|
||||||
|
dict(
|
||||||
|
src=source_rel,
|
||||||
|
dest=dest,
|
||||||
|
original_basename=source_rel
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# FIXME: checkmode and no_log stuff
|
||||||
|
#if self.runner.noop_on_check(inject):
|
||||||
|
# new_module_args['CHECKMODE'] = True
|
||||||
|
#if self.runner.no_log:
|
||||||
|
# new_module_args['NO_LOG'] = True
|
||||||
|
|
||||||
|
# Execute the file module.
|
||||||
|
module_return = self._execute_module(module_name='file', module_args=new_module_args, tmp=tmp, delete_remote_tmp=delete_remote_tmp)
|
||||||
|
module_executed = True
|
||||||
|
|
||||||
|
if not module_return.get('checksum'):
|
||||||
|
module_return['checksum'] = local_checksum
|
||||||
|
if module_return.get('failed') == True:
|
||||||
|
return module_return
|
||||||
|
if module_return.get('changed') == True:
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
# Delete tmp path if we were recursive or if we did not execute a module.
|
||||||
|
if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
|
||||||
|
or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
|
||||||
|
self._remove_tmp_path(tmp)
|
||||||
|
|
||||||
|
# the file module returns the file path as 'path', but
|
||||||
|
# the copy module uses 'dest', so add it if it's not there
|
||||||
|
if 'path' in module_return and 'dest' not in module_return:
|
||||||
|
module_return['dest'] = module_return['path']
|
||||||
|
|
||||||
|
# TODO: Support detailed status/diff for multiple files
|
||||||
|
if len(source_files) == 1:
|
||||||
|
result = module_return
|
||||||
|
else:
|
||||||
|
result = dict(dest=dest, src=source, changed=changed)
|
||||||
|
|
||||||
|
# FIXME: move diffs into the result?
|
||||||
|
#if len(diffs) == 1:
|
||||||
|
# return ReturnData(conn=conn, result=result, diff=diffs[0])
|
||||||
|
#else:
|
||||||
|
# return ReturnData(conn=conn, result=result)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _create_content_tempfile(self, content):
|
||||||
|
''' Create a tempfile containing defined content '''
|
||||||
|
fd, content_tempfile = tempfile.mkstemp()
|
||||||
|
f = os.fdopen(fd, 'w')
|
||||||
|
try:
|
||||||
|
f.write(content)
|
||||||
|
except Exception, err:
|
||||||
|
os.remove(content_tempfile)
|
||||||
|
raise Exception(err)
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
return content_tempfile
|
||||||
|
|
||||||
|
def _get_diff_data(self, tmp, destination, source):
|
||||||
|
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), persist_files=True)
|
||||||
|
if 'failed' in peek_result and peek_result['failed'] or peek_result.get('rc', 0) != 0:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
diff = {}
|
||||||
|
if peek_result['state'] == 'absent':
|
||||||
|
diff['before'] = ''
|
||||||
|
elif peek_result['appears_binary']:
|
||||||
|
diff['dst_binary'] = 1
|
||||||
|
# FIXME: this should not be in utils..
|
||||||
|
#elif peek_result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
|
||||||
|
# diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
|
||||||
|
else:
|
||||||
|
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), tmp=tmp, persist_files=True)
|
||||||
|
if 'content' in dest_result:
|
||||||
|
dest_contents = dest_result['content']
|
||||||
|
if dest_result['encoding'] == 'base64':
|
||||||
|
dest_contents = base64.b64decode(dest_contents)
|
||||||
|
else:
|
||||||
|
raise Exception("unknown encoding, failed: %s" % dest_result)
|
||||||
|
diff['before_header'] = destination
|
||||||
|
diff['before'] = dest_contents
|
||||||
|
|
||||||
|
src = open(source)
|
||||||
|
src_contents = src.read(8192)
|
||||||
|
st = os.stat(source)
|
||||||
|
if "\x00" in src_contents:
|
||||||
|
diff['src_binary'] = 1
|
||||||
|
# FIXME: this should not be in utils
|
||||||
|
#elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
|
||||||
|
# diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
|
||||||
|
else:
|
||||||
|
src.seek(0)
|
||||||
|
diff['after_header'] = source
|
||||||
|
diff['after'] = src.read()
|
||||||
|
|
||||||
|
return diff
|
||||||
|
|
||||||
|
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
|
||||||
|
if content is not None:
|
||||||
|
os.remove(content_tempfile)
|
||||||
|
|
46
v2/ansible/plugins/action/debug.py
Normal file
46
v2/ansible/plugins/action/debug.py
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
# Copyright 2012, Dag Wieers <dag@wieers.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from ansible.plugins.action import ActionBase
|
||||||
|
from ansible.utils.boolean import boolean
|
||||||
|
from ansible.template import Templar
|
||||||
|
|
||||||
|
class ActionModule(ActionBase):
|
||||||
|
''' Print statements during execution '''
|
||||||
|
|
||||||
|
TRANSFERS_FILES = False
|
||||||
|
|
||||||
|
def run(self, tmp=None, task_vars=dict()):
|
||||||
|
|
||||||
|
if 'msg' in self._task.args:
|
||||||
|
if 'fail' in self._task.args and boolean(self._task.args['fail']):
|
||||||
|
result = dict(failed=True, msg=self._task.args['msg'])
|
||||||
|
else:
|
||||||
|
result = dict(msg=self._task.args['msg'])
|
||||||
|
# FIXME: move the LOOKUP_REGEX somewhere else
|
||||||
|
elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
|
||||||
|
templar = Templar(variables=task_vars)
|
||||||
|
results = templar.template(self._task.args['var'], convert_bare=True)
|
||||||
|
result = dict()
|
||||||
|
result[self._task.args['var']] = results
|
||||||
|
else:
|
||||||
|
result = dict(msg='here we are')
|
||||||
|
|
||||||
|
# force flag to make debug output module always verbose
|
||||||
|
result['verbose_always'] = True
|
||||||
|
|
||||||
|
return result
|
48
v2/ansible/plugins/action/include_vars.py
Normal file
48
v2/ansible/plugins/action/include_vars.py
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# (c) 2013-2014, Benno Joy <benno@ansible.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from types import NoneType
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.parsing import DataLoader
|
||||||
|
from ansible.plugins.action import ActionBase
|
||||||
|
|
||||||
|
class ActionModule(ActionBase):
|
||||||
|
|
||||||
|
TRANSFERS_FILES = False
|
||||||
|
|
||||||
|
def run(self, tmp=None, task_vars=dict()):
|
||||||
|
|
||||||
|
source = self._task.args.get('_raw_params')
|
||||||
|
|
||||||
|
if self._task._role:
|
||||||
|
source = self._loader.path_dwim_relative(self._task._role.get('_role_path',''), 'vars', source)
|
||||||
|
else:
|
||||||
|
source = self._loader.path_dwim(source)
|
||||||
|
|
||||||
|
if os.path.exists(source):
|
||||||
|
data = self._loader.load_from_file(source)
|
||||||
|
if data is None:
|
||||||
|
data = {}
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
|
||||||
|
return dict(ansible_facts=data)
|
||||||
|
else:
|
||||||
|
return dict(failed=True, msg="Source file not found.", file=source)
|
||||||
|
|
40
v2/ansible/plugins/action/normal.py
Normal file
40
v2/ansible/plugins/action/normal.py
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from ansible.plugins.action import ActionBase
|
||||||
|
|
||||||
|
class ActionModule(ActionBase):
|
||||||
|
|
||||||
|
def run(self, tmp=None, task_vars=dict()):
|
||||||
|
|
||||||
|
# FIXME: a lot of this should pretty much go away with module
|
||||||
|
# args being stored within the task being run itself
|
||||||
|
|
||||||
|
#if self.runner.noop_on_check(inject):
|
||||||
|
# if module_name in [ 'shell', 'command' ]:
|
||||||
|
# return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for %s' % module_name))
|
||||||
|
# # else let the module parsing code decide, though this will only be allowed for AnsibleModuleCommon using
|
||||||
|
# # python modules for now
|
||||||
|
# module_args += " CHECKMODE=True"
|
||||||
|
|
||||||
|
#if self.runner.no_log:
|
||||||
|
# module_args += " NO_LOG=True"
|
||||||
|
|
||||||
|
#vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
|
||||||
|
return self._execute_module(tmp)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
# Copyright 2013 Dag Wieers <dag@wieers.com>
|
||||||
#
|
#
|
||||||
# This file is part of Ansible
|
# This file is part of Ansible
|
||||||
#
|
#
|
||||||
|
@ -15,19 +15,12 @@
|
||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from ansible.utils import template
|
from ansible.errors import AnsibleError
|
||||||
import ansible.utils as utils
|
from ansible.plugins.action import ActionBase
|
||||||
|
|
||||||
class LookupModule(object):
|
class ActionModule(ActionBase):
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
TRANSFERS_FILES = False
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
def run(self, tmp=None, task_vars=dict()):
|
||||||
|
return dict(changed=True, ansible_facts=self._task.args)
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
ret.append(template.template_from_file(self.basedir, term, inject))
|
|
||||||
return ret
|
|
|
@ -19,3 +19,86 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
__all__ = ["CallbackBase"]
|
||||||
|
|
||||||
|
class CallbackBase:
|
||||||
|
|
||||||
|
'''
|
||||||
|
This is a base ansible callback class that does nothing. New callbacks should
|
||||||
|
use this class as a base and override any callback methods they wish to execute
|
||||||
|
custom actions.
|
||||||
|
'''
|
||||||
|
|
||||||
|
# FIXME: the list of functions here needs to be updated once we have
|
||||||
|
# finalized the list of callback methods used in the default callback
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._display = Display()
|
||||||
|
|
||||||
|
def set_connection_info(self, conn_info):
|
||||||
|
# FIXME: this is a temporary hack, as the connection info object
|
||||||
|
# should be created early and passed down through objects
|
||||||
|
self._display._verbosity = conn_info.verbosity
|
||||||
|
|
||||||
|
def on_any(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_failed(self, host, res, ignore_errors=False):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_ok(self, host, res):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_skipped(self, host, item=None):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_unreachable(self, host, res):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_no_hosts(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_poll(self, host, res, jid, clock):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_ok(self, host, res, jid):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_failed(self, host, res, jid):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_start(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_notify(self, host, handler):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_no_hosts_matched(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_no_hosts_remaining(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_task_start(self, name, is_conditional):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_setup(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_import_for_host(self, host, imported_file):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_play_start(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_stats(self, stats):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
120
v2/ansible/plugins/callback/default.py
Normal file
120
v2/ansible/plugins/callback/default.py
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
|
class CallbackModule(CallbackBase):
|
||||||
|
|
||||||
|
'''
|
||||||
|
This is the default callback interface, which simply prints messages
|
||||||
|
to stdout when new callback events are received.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def _print_banner(self, msg, color=None):
|
||||||
|
'''
|
||||||
|
Prints a header-looking line with stars taking up to 80 columns
|
||||||
|
of width (3 columns, minimum)
|
||||||
|
'''
|
||||||
|
msg = msg.strip()
|
||||||
|
star_len = (80 - len(msg))
|
||||||
|
if star_len < 0:
|
||||||
|
star_len = 3
|
||||||
|
stars = "*" * star_len
|
||||||
|
self._display.display("\n%s %s" % (msg, stars), color=color)
|
||||||
|
|
||||||
|
def on_any(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_failed(self, task, result, ignore_errors=False):
|
||||||
|
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), result._result), color='red')
|
||||||
|
|
||||||
|
def runner_on_ok(self, task, result):
|
||||||
|
msg = "ok: [%s]" % result._host.get_name()
|
||||||
|
if self._display._verbosity > 0 or 'verbose_always' in result._result:
|
||||||
|
if 'verbose_always' in result._result:
|
||||||
|
del result._result['verbose_always']
|
||||||
|
msg += " => %s" % result._result
|
||||||
|
self._display.display(msg, color='green')
|
||||||
|
|
||||||
|
def runner_on_skipped(self, task, result):
|
||||||
|
msg = "SKIPPED: [%s]" % result._host.get_name()
|
||||||
|
if self._display._verbosity > 0 or 'verbose_always' in result._result:
|
||||||
|
if 'verbose_always' in result._result:
|
||||||
|
del result._result['verbose_always']
|
||||||
|
msg += " => %s" % result._result
|
||||||
|
self._display.display(msg)
|
||||||
|
|
||||||
|
def runner_on_unreachable(self, task, result):
|
||||||
|
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red')
|
||||||
|
|
||||||
|
def runner_on_no_hosts(self, task):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_poll(self, host, res, jid, clock):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_ok(self, host, res, jid):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_failed(self, host, res, jid):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_start(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_notify(self, host, handler):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_no_hosts_matched(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_no_hosts_remaining(self):
|
||||||
|
self._print_banner("NO MORE HOSTS LEFT")
|
||||||
|
|
||||||
|
def playbook_on_task_start(self, name, is_conditional):
|
||||||
|
self._print_banner("TASK [%s]" % name.strip())
|
||||||
|
|
||||||
|
def playbook_on_cleanup_task_start(self, name):
|
||||||
|
self._print_banner("CLEANUP TASK [%s]" % name.strip())
|
||||||
|
|
||||||
|
def playbook_on_handler_task_start(self, name):
|
||||||
|
self._print_banner("RUNNING HANDLER [%s]" % name.strip())
|
||||||
|
|
||||||
|
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_setup(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_import_for_host(self, host, imported_file):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_play_start(self, name):
|
||||||
|
self._print_banner("PLAY [%s]" % name.strip())
|
||||||
|
|
||||||
|
def playbook_on_stats(self, stats):
|
||||||
|
pass
|
||||||
|
|
111
v2/ansible/plugins/callback/minimal.py
Normal file
111
v2/ansible/plugins/callback/minimal.py
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Make coding more python3-ish
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from ansible.plugins.callback import CallbackBase
|
||||||
|
|
||||||
|
|
||||||
|
class CallbackModule(CallbackBase):
|
||||||
|
|
||||||
|
'''
|
||||||
|
This is the default callback interface, which simply prints messages
|
||||||
|
to stdout when new callback events are received.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def _print_banner(self, msg):
|
||||||
|
'''
|
||||||
|
Prints a header-looking line with stars taking up to 80 columns
|
||||||
|
of width (3 columns, minimum)
|
||||||
|
'''
|
||||||
|
msg = msg.strip()
|
||||||
|
star_len = (80 - len(msg))
|
||||||
|
if star_len < 0:
|
||||||
|
star_len = 3
|
||||||
|
stars = "*" * star_len
|
||||||
|
self._display.display("\n%s %s\n" % (msg, stars))
|
||||||
|
|
||||||
|
def on_any(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_failed(self, task, result, ignore_errors=False):
|
||||||
|
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red')
|
||||||
|
|
||||||
|
def runner_on_ok(self, task, result):
|
||||||
|
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), json.dumps(result._result, indent=4)), color='green')
|
||||||
|
|
||||||
|
def runner_on_skipped(self, task, result):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_unreachable(self, task, result):
|
||||||
|
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
|
||||||
|
|
||||||
|
def runner_on_no_hosts(self, task):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_poll(self, host, res, jid, clock):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_ok(self, host, res, jid):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def runner_on_async_failed(self, host, res, jid):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_start(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_notify(self, host, handler):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_no_hosts_matched(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_no_hosts_remaining(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_task_start(self, name, is_conditional):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_cleanup_task_start(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_handler_task_start(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_setup(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_import_for_host(self, host, imported_file):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_play_start(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def playbook_on_stats(self, stats):
|
||||||
|
pass
|
||||||
|
|
|
@ -19,3 +19,24 @@
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
|
||||||
|
# FIXME: this object should be created upfront and passed through
|
||||||
|
# the entire chain of calls to here, as there are other things
|
||||||
|
# which may want to output display/logs too
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
__all__ = ['ConnectionBase']
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionBase:
|
||||||
|
'''
|
||||||
|
A base class for connections to contain common code.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, host, connection_info, *args, **kwargs):
|
||||||
|
self._host = host
|
||||||
|
self._connection_info = connection_info
|
||||||
|
self._has_pipelining = False
|
||||||
|
self._display = Display(connection_info)
|
||||||
|
|
||||||
|
|
371
v2/ansible/plugins/connections/accelerate.py
Normal file
371
v2/ansible/plugins/connections/accelerate.py
Normal file
|
@ -0,0 +1,371 @@
|
||||||
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import base64
|
||||||
|
import socket
|
||||||
|
import struct
|
||||||
|
import time
|
||||||
|
from ansible.callbacks import vvv, vvvv
|
||||||
|
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||||
|
from ansible.runner.connection_plugins.ssh import Connection as SSHConnection
|
||||||
|
from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection
|
||||||
|
from ansible import utils
|
||||||
|
from ansible import constants
|
||||||
|
|
||||||
|
# the chunk size to read and send, assuming mtu 1500 and
|
||||||
|
# leaving room for base64 (+33%) encoding and header (8 bytes)
|
||||||
|
# ((1400-8)/4)*3) = 1044
|
||||||
|
# which leaves room for the TCP/IP header. We set this to a
|
||||||
|
# multiple of the value to speed up file reads.
|
||||||
|
CHUNK_SIZE=1044*20
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
''' raw socket accelerated connection '''
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
|
||||||
|
|
||||||
|
self.runner = runner
|
||||||
|
self.host = host
|
||||||
|
self.context = None
|
||||||
|
self.conn = None
|
||||||
|
self.user = user
|
||||||
|
self.key = utils.key_for_hostname(host)
|
||||||
|
self.port = port[0]
|
||||||
|
self.accport = port[1]
|
||||||
|
self.is_connected = False
|
||||||
|
self.has_pipelining = False
|
||||||
|
|
||||||
|
if not self.port:
|
||||||
|
self.port = constants.DEFAULT_REMOTE_PORT
|
||||||
|
elif not isinstance(self.port, int):
|
||||||
|
self.port = int(self.port)
|
||||||
|
|
||||||
|
if not self.accport:
|
||||||
|
self.accport = constants.ACCELERATE_PORT
|
||||||
|
elif not isinstance(self.accport, int):
|
||||||
|
self.accport = int(self.accport)
|
||||||
|
|
||||||
|
if self.runner.original_transport == "paramiko":
|
||||||
|
self.ssh = ParamikoConnection(
|
||||||
|
runner=self.runner,
|
||||||
|
host=self.host,
|
||||||
|
port=self.port,
|
||||||
|
user=self.user,
|
||||||
|
password=password,
|
||||||
|
private_key_file=private_key_file
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.ssh = SSHConnection(
|
||||||
|
runner=self.runner,
|
||||||
|
host=self.host,
|
||||||
|
port=self.port,
|
||||||
|
user=self.user,
|
||||||
|
password=password,
|
||||||
|
private_key_file=private_key_file
|
||||||
|
)
|
||||||
|
|
||||||
|
if not getattr(self.ssh, 'shell', None):
|
||||||
|
self.ssh.shell = utils.plugins.shell_loader.get('sh')
|
||||||
|
|
||||||
|
# attempt to work around shared-memory funness
|
||||||
|
if getattr(self.runner, 'aes_keys', None):
|
||||||
|
utils.AES_KEYS = self.runner.aes_keys
|
||||||
|
|
||||||
|
def _execute_accelerate_module(self):
|
||||||
|
args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
|
||||||
|
base64.b64encode(self.key.__str__()),
|
||||||
|
str(self.accport),
|
||||||
|
constants.ACCELERATE_DAEMON_TIMEOUT,
|
||||||
|
int(utils.VERBOSITY),
|
||||||
|
self.runner.accelerate_ipv6,
|
||||||
|
)
|
||||||
|
if constants.ACCELERATE_MULTI_KEY:
|
||||||
|
args += " multi_key=yes"
|
||||||
|
inject = dict(password=self.key)
|
||||||
|
if getattr(self.runner, 'accelerate_inventory_host', False):
|
||||||
|
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
|
||||||
|
else:
|
||||||
|
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
|
||||||
|
vvvv("attempting to start up the accelerate daemon...")
|
||||||
|
self.ssh.connect()
|
||||||
|
tmp_path = self.runner._make_tmp_path(self.ssh)
|
||||||
|
return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
|
||||||
|
|
||||||
|
def connect(self, allow_ssh=True):
|
||||||
|
''' activates the connection object '''
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not self.is_connected:
|
||||||
|
wrong_user = False
|
||||||
|
tries = 3
|
||||||
|
self.conn = socket.socket()
|
||||||
|
self.conn.settimeout(constants.ACCELERATE_CONNECT_TIMEOUT)
|
||||||
|
vvvv("attempting connection to %s via the accelerated port %d" % (self.host,self.accport))
|
||||||
|
while tries > 0:
|
||||||
|
try:
|
||||||
|
self.conn.connect((self.host,self.accport))
|
||||||
|
break
|
||||||
|
except socket.error:
|
||||||
|
vvvv("connection to %s failed, retrying..." % self.host)
|
||||||
|
time.sleep(0.1)
|
||||||
|
tries -= 1
|
||||||
|
if tries == 0:
|
||||||
|
vvv("Could not connect via the accelerated connection, exceeded # of tries")
|
||||||
|
raise AnsibleError("FAILED")
|
||||||
|
elif wrong_user:
|
||||||
|
vvv("Restarting daemon with a different remote_user")
|
||||||
|
raise AnsibleError("WRONG_USER")
|
||||||
|
|
||||||
|
self.conn.settimeout(constants.ACCELERATE_TIMEOUT)
|
||||||
|
if not self.validate_user():
|
||||||
|
# the accelerated daemon was started with a
|
||||||
|
# different remote_user. The above command
|
||||||
|
# should have caused the accelerate daemon to
|
||||||
|
# shutdown, so we'll reconnect.
|
||||||
|
wrong_user = True
|
||||||
|
|
||||||
|
except AnsibleError, e:
|
||||||
|
if allow_ssh:
|
||||||
|
if "WRONG_USER" in e:
|
||||||
|
vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
|
||||||
|
time.sleep(5)
|
||||||
|
vvv("Falling back to ssh to startup accelerated mode")
|
||||||
|
res = self._execute_accelerate_module()
|
||||||
|
if not res.is_successful():
|
||||||
|
raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg')))
|
||||||
|
return self.connect(allow_ssh=False)
|
||||||
|
else:
|
||||||
|
raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport))
|
||||||
|
self.is_connected = True
|
||||||
|
return self
|
||||||
|
|
||||||
|
def send_data(self, data):
|
||||||
|
packed_len = struct.pack('!Q',len(data))
|
||||||
|
return self.conn.sendall(packed_len + data)
|
||||||
|
|
||||||
|
def recv_data(self):
|
||||||
|
header_len = 8 # size of a packed unsigned long long
|
||||||
|
data = b""
|
||||||
|
try:
|
||||||
|
vvvv("%s: in recv_data(), waiting for the header" % self.host)
|
||||||
|
while len(data) < header_len:
|
||||||
|
d = self.conn.recv(header_len - len(data))
|
||||||
|
if not d:
|
||||||
|
vvvv("%s: received nothing, bailing out" % self.host)
|
||||||
|
return None
|
||||||
|
data += d
|
||||||
|
vvvv("%s: got the header, unpacking" % self.host)
|
||||||
|
data_len = struct.unpack('!Q',data[:header_len])[0]
|
||||||
|
data = data[header_len:]
|
||||||
|
vvvv("%s: data received so far (expecting %d): %d" % (self.host,data_len,len(data)))
|
||||||
|
while len(data) < data_len:
|
||||||
|
d = self.conn.recv(data_len - len(data))
|
||||||
|
if not d:
|
||||||
|
vvvv("%s: received nothing, bailing out" % self.host)
|
||||||
|
return None
|
||||||
|
vvvv("%s: received %d bytes" % (self.host, len(d)))
|
||||||
|
data += d
|
||||||
|
vvvv("%s: received all of the data, returning" % self.host)
|
||||||
|
return data
|
||||||
|
except socket.timeout:
|
||||||
|
raise AnsibleError("timed out while waiting to receive data")
|
||||||
|
|
||||||
|
def validate_user(self):
|
||||||
|
'''
|
||||||
|
Checks the remote uid of the accelerated daemon vs. the
|
||||||
|
one specified for this play and will cause the accel
|
||||||
|
daemon to exit if they don't match
|
||||||
|
'''
|
||||||
|
|
||||||
|
vvvv("%s: sending request for validate_user" % self.host)
|
||||||
|
data = dict(
|
||||||
|
mode='validate_user',
|
||||||
|
username=self.user,
|
||||||
|
)
|
||||||
|
data = utils.jsonify(data)
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
if self.send_data(data):
|
||||||
|
raise AnsibleError("Failed to send command to %s" % self.host)
|
||||||
|
|
||||||
|
vvvv("%s: waiting for validate_user response" % self.host)
|
||||||
|
while True:
|
||||||
|
# we loop here while waiting for the response, because a
|
||||||
|
# long running command may cause us to receive keepalive packets
|
||||||
|
# ({"pong":"true"}) rather than the response we want.
|
||||||
|
response = self.recv_data()
|
||||||
|
if not response:
|
||||||
|
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
if "pong" in response:
|
||||||
|
# it's a keepalive, go back to waiting
|
||||||
|
vvvv("%s: received a keepalive packet" % self.host)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
vvvv("%s: received the validate_user response: %s" % (self.host, response))
|
||||||
|
break
|
||||||
|
|
||||||
|
if response.get('failed'):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return response.get('rc') == 0
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||||
|
''' run a command on the remote host '''
|
||||||
|
|
||||||
|
if su or su_user:
|
||||||
|
raise AnsibleError("Internal Error: this module does not support running commands via su")
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
if executable == "":
|
||||||
|
executable = constants.DEFAULT_EXECUTABLE
|
||||||
|
|
||||||
|
if self.runner.sudo and sudoable and sudo_user:
|
||||||
|
cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
|
||||||
|
|
||||||
|
vvv("EXEC COMMAND %s" % cmd)
|
||||||
|
|
||||||
|
data = dict(
|
||||||
|
mode='command',
|
||||||
|
cmd=cmd,
|
||||||
|
tmp_path=tmp_path,
|
||||||
|
executable=executable,
|
||||||
|
)
|
||||||
|
data = utils.jsonify(data)
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
if self.send_data(data):
|
||||||
|
raise AnsibleError("Failed to send command to %s" % self.host)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# we loop here while waiting for the response, because a
|
||||||
|
# long running command may cause us to receive keepalive packets
|
||||||
|
# ({"pong":"true"}) rather than the response we want.
|
||||||
|
response = self.recv_data()
|
||||||
|
if not response:
|
||||||
|
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
if "pong" in response:
|
||||||
|
# it's a keepalive, go back to waiting
|
||||||
|
vvvv("%s: received a keepalive packet" % self.host)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
vvvv("%s: received the response" % self.host)
|
||||||
|
break
|
||||||
|
|
||||||
|
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
|
||||||
|
''' transfer a file from local to remote '''
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
|
||||||
|
fd = file(in_path, 'rb')
|
||||||
|
fstat = os.stat(in_path)
|
||||||
|
try:
|
||||||
|
vvv("PUT file is %d bytes" % fstat.st_size)
|
||||||
|
last = False
|
||||||
|
while fd.tell() <= fstat.st_size and not last:
|
||||||
|
vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
|
||||||
|
data = fd.read(CHUNK_SIZE)
|
||||||
|
if fd.tell() >= fstat.st_size:
|
||||||
|
last = True
|
||||||
|
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
|
||||||
|
if self.runner.sudo:
|
||||||
|
data['user'] = self.runner.sudo_user
|
||||||
|
data = utils.jsonify(data)
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
|
||||||
|
if self.send_data(data):
|
||||||
|
raise AnsibleError("failed to send the file to %s" % self.host)
|
||||||
|
|
||||||
|
response = self.recv_data()
|
||||||
|
if not response:
|
||||||
|
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
|
||||||
|
if response.get('failed',False):
|
||||||
|
raise AnsibleError("failed to put the file in the requested location")
|
||||||
|
finally:
|
||||||
|
fd.close()
|
||||||
|
vvvv("waiting for final response after PUT")
|
||||||
|
response = self.recv_data()
|
||||||
|
if not response:
|
||||||
|
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
|
||||||
|
if response.get('failed',False):
|
||||||
|
raise AnsibleError("failed to put the file in the requested location")
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' save a remote file to the specified path '''
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
|
||||||
|
data = dict(mode='fetch', in_path=in_path)
|
||||||
|
data = utils.jsonify(data)
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
if self.send_data(data):
|
||||||
|
raise AnsibleError("failed to initiate the file fetch with %s" % self.host)
|
||||||
|
|
||||||
|
fh = open(out_path, "w")
|
||||||
|
try:
|
||||||
|
bytes = 0
|
||||||
|
while True:
|
||||||
|
response = self.recv_data()
|
||||||
|
if not response:
|
||||||
|
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
if response.get('failed', False):
|
||||||
|
raise AnsibleError("Error during file fetch, aborting")
|
||||||
|
out = base64.b64decode(response['data'])
|
||||||
|
fh.write(out)
|
||||||
|
bytes += len(out)
|
||||||
|
# send an empty response back to signify we
|
||||||
|
# received the last chunk without errors
|
||||||
|
data = utils.jsonify(dict())
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
if self.send_data(data):
|
||||||
|
raise AnsibleError("failed to send ack during file fetch")
|
||||||
|
if response.get('last', False):
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
# we don't currently care about this final response,
|
||||||
|
# we just receive it and drop it. It may be used at some
|
||||||
|
# point in the future or we may just have the put/fetch
|
||||||
|
# operations not send back a final response at all
|
||||||
|
response = self.recv_data()
|
||||||
|
vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
|
||||||
|
fh.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection '''
|
||||||
|
# Be a good citizen
|
||||||
|
try:
|
||||||
|
self.conn.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
130
v2/ansible/plugins/connections/chroot.py
Normal file
130
v2/ansible/plugins/connections/chroot.py
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import distutils.spawn
|
||||||
|
import traceback
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
from ansible import errors
|
||||||
|
from ansible import utils
|
||||||
|
from ansible.callbacks import vvv
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
''' Local chroot based connections '''
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, *args, **kwargs):
|
||||||
|
self.chroot = host
|
||||||
|
self.has_pipelining = False
|
||||||
|
|
||||||
|
if os.geteuid() != 0:
|
||||||
|
raise errors.AnsibleError("chroot connection requires running as root")
|
||||||
|
|
||||||
|
# we're running as root on the local system so do some
|
||||||
|
# trivial checks for ensuring 'host' is actually a chroot'able dir
|
||||||
|
if not os.path.isdir(self.chroot):
|
||||||
|
raise errors.AnsibleError("%s is not a directory" % self.chroot)
|
||||||
|
|
||||||
|
chrootsh = os.path.join(self.chroot, 'bin/sh')
|
||||||
|
if not utils.is_executable(chrootsh):
|
||||||
|
raise errors.AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
|
||||||
|
|
||||||
|
self.chroot_cmd = distutils.spawn.find_executable('chroot')
|
||||||
|
if not self.chroot_cmd:
|
||||||
|
raise errors.AnsibleError("chroot command not found in PATH")
|
||||||
|
|
||||||
|
self.runner = runner
|
||||||
|
self.host = host
|
||||||
|
# port is unused, since this is local
|
||||||
|
self.port = port
|
||||||
|
|
||||||
|
def connect(self, port=None):
|
||||||
|
''' connect to the chroot; nothing to do here '''
|
||||||
|
|
||||||
|
vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||||
|
''' run a command on the chroot '''
|
||||||
|
|
||||||
|
if su or su_user:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
# We enter chroot as root so sudo stuff can be ignored
|
||||||
|
|
||||||
|
if executable:
|
||||||
|
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
|
||||||
|
else:
|
||||||
|
local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd)
|
||||||
|
|
||||||
|
vvv("EXEC %s" % (local_cmd), host=self.chroot)
|
||||||
|
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
|
||||||
|
cwd=self.runner.basedir,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
return (p.returncode, '', stdout, stderr)
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
''' transfer a file from local to chroot '''
|
||||||
|
|
||||||
|
if not out_path.startswith(os.path.sep):
|
||||||
|
out_path = os.path.join(os.path.sep, out_path)
|
||||||
|
normpath = os.path.normpath(out_path)
|
||||||
|
out_path = os.path.join(self.chroot, normpath[1:])
|
||||||
|
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
try:
|
||||||
|
shutil.copyfile(in_path, out_path)
|
||||||
|
except shutil.Error:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||||
|
except IOError:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' fetch a file from chroot to local '''
|
||||||
|
|
||||||
|
if not in_path.startswith(os.path.sep):
|
||||||
|
in_path = os.path.join(os.path.sep, in_path)
|
||||||
|
normpath = os.path.normpath(in_path)
|
||||||
|
in_path = os.path.join(self.chroot, normpath[1:])
|
||||||
|
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
try:
|
||||||
|
shutil.copyfile(in_path, out_path)
|
||||||
|
except shutil.Error:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||||
|
except IOError:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection; nothing to do here '''
|
||||||
|
pass
|
151
v2/ansible/plugins/connections/fireball.py
Normal file
151
v2/ansible/plugins/connections/fireball.py
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import base64
|
||||||
|
from ansible.callbacks import vvv
|
||||||
|
from ansible import utils
|
||||||
|
from ansible import errors
|
||||||
|
from ansible import constants
|
||||||
|
|
||||||
|
HAVE_ZMQ=False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import zmq
|
||||||
|
HAVE_ZMQ=True
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
''' ZeroMQ accelerated connection '''
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, *args, **kwargs):
|
||||||
|
|
||||||
|
self.runner = runner
|
||||||
|
self.has_pipelining = False
|
||||||
|
|
||||||
|
# attempt to work around shared-memory funness
|
||||||
|
if getattr(self.runner, 'aes_keys', None):
|
||||||
|
utils.AES_KEYS = self.runner.aes_keys
|
||||||
|
|
||||||
|
self.host = host
|
||||||
|
self.key = utils.key_for_hostname(host)
|
||||||
|
self.context = None
|
||||||
|
self.socket = None
|
||||||
|
|
||||||
|
if port is None:
|
||||||
|
self.port = constants.ZEROMQ_PORT
|
||||||
|
else:
|
||||||
|
self.port = port
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
''' activates the connection object '''
|
||||||
|
|
||||||
|
if not HAVE_ZMQ:
|
||||||
|
raise errors.AnsibleError("zmq is not installed")
|
||||||
|
|
||||||
|
# this is rough/temporary and will likely be optimized later ...
|
||||||
|
self.context = zmq.Context()
|
||||||
|
socket = self.context.socket(zmq.REQ)
|
||||||
|
addr = "tcp://%s:%s" % (self.host, self.port)
|
||||||
|
socket.connect(addr)
|
||||||
|
self.socket = socket
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=None):
|
||||||
|
''' run a command on the remote host '''
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
vvv("EXEC COMMAND %s" % cmd)
|
||||||
|
|
||||||
|
if (self.runner.sudo and sudoable) or (self.runner.su and su):
|
||||||
|
raise errors.AnsibleError(
|
||||||
|
"When using fireball, do not specify sudo or su to run your tasks. " +
|
||||||
|
"Instead sudo the fireball action with sudo. " +
|
||||||
|
"Task will communicate with the fireball already running in sudo mode."
|
||||||
|
)
|
||||||
|
|
||||||
|
data = dict(
|
||||||
|
mode='command',
|
||||||
|
cmd=cmd,
|
||||||
|
tmp_path=tmp_path,
|
||||||
|
executable=executable,
|
||||||
|
)
|
||||||
|
data = utils.jsonify(data)
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
self.socket.send(data)
|
||||||
|
|
||||||
|
response = self.socket.recv()
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
|
||||||
|
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
|
||||||
|
''' transfer a file from local to remote '''
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
data = file(in_path).read()
|
||||||
|
data = base64.b64encode(data)
|
||||||
|
|
||||||
|
data = dict(mode='put', data=data, out_path=out_path)
|
||||||
|
# TODO: support chunked file transfer
|
||||||
|
data = utils.jsonify(data)
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
self.socket.send(data)
|
||||||
|
|
||||||
|
response = self.socket.recv()
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
|
||||||
|
# no meaningful response needed for this
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' save a remote file to the specified path '''
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
|
||||||
|
data = dict(mode='fetch', in_path=in_path)
|
||||||
|
data = utils.jsonify(data)
|
||||||
|
data = utils.encrypt(self.key, data)
|
||||||
|
self.socket.send(data)
|
||||||
|
|
||||||
|
response = self.socket.recv()
|
||||||
|
response = utils.decrypt(self.key, response)
|
||||||
|
response = utils.parse_json(response)
|
||||||
|
response = response['data']
|
||||||
|
response = base64.b64decode(response)
|
||||||
|
|
||||||
|
fh = open(out_path, "w")
|
||||||
|
fh.write(response)
|
||||||
|
fh.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection '''
|
||||||
|
# Be a good citizen
|
||||||
|
try:
|
||||||
|
self.socket.close()
|
||||||
|
self.context.term()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
99
v2/ansible/plugins/connections/funcd.py
Normal file
99
v2/ansible/plugins/connections/funcd.py
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
|
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# ---
|
||||||
|
# The func transport permit to use ansible over func. For people who have already setup
|
||||||
|
# func and that wish to play with ansible, this permit to move gradually to ansible
|
||||||
|
# without having to redo completely the setup of the network.
|
||||||
|
|
||||||
|
HAVE_FUNC=False
|
||||||
|
try:
|
||||||
|
import func.overlord.client as fc
|
||||||
|
HAVE_FUNC=True
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
import os
|
||||||
|
from ansible.callbacks import vvv
|
||||||
|
from ansible import errors
|
||||||
|
import tempfile
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
''' Func-based connections '''
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, *args, **kwargs):
|
||||||
|
self.runner = runner
|
||||||
|
self.host = host
|
||||||
|
self.has_pipelining = False
|
||||||
|
# port is unused, this go on func
|
||||||
|
self.port = port
|
||||||
|
|
||||||
|
def connect(self, port=None):
|
||||||
|
if not HAVE_FUNC:
|
||||||
|
raise errors.AnsibleError("func is not installed")
|
||||||
|
|
||||||
|
self.client = fc.Client(self.host)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False,
|
||||||
|
executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||||
|
''' run a command on the remote minion '''
|
||||||
|
|
||||||
|
if su or su_user:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
vvv("EXEC %s" % (cmd), host=self.host)
|
||||||
|
p = self.client.command.run(cmd)[self.host]
|
||||||
|
return (p[0], '', p[1], p[2])
|
||||||
|
|
||||||
|
def _normalize_path(self, path, prefix):
|
||||||
|
if not path.startswith(os.path.sep):
|
||||||
|
path = os.path.join(os.path.sep, path)
|
||||||
|
normpath = os.path.normpath(path)
|
||||||
|
return os.path.join(prefix, normpath[1:])
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
''' transfer a file from local to remote '''
|
||||||
|
|
||||||
|
out_path = self._normalize_path(out_path, '/')
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
self.client.local.copyfile.send(in_path, out_path)
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' fetch a file from remote to local '''
|
||||||
|
|
||||||
|
in_path = self._normalize_path(in_path, '/')
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
# need to use a tmp dir due to difference of semantic for getfile
|
||||||
|
# ( who take a # directory as destination) and fetch_file, who
|
||||||
|
# take a file directly
|
||||||
|
tmpdir = tempfile.mkdtemp(prefix="func_ansible")
|
||||||
|
self.client.local.getfile.get(in_path, tmpdir)
|
||||||
|
shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)),
|
||||||
|
out_path)
|
||||||
|
shutil.rmtree(tmpdir)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection; nothing to do here '''
|
||||||
|
pass
|
151
v2/ansible/plugins/connections/jail.py
Normal file
151
v2/ansible/plugins/connections/jail.py
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
|
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import distutils.spawn
|
||||||
|
import traceback
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
from ansible import errors
|
||||||
|
from ansible.callbacks import vvv
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
''' Local chroot based connections '''
|
||||||
|
|
||||||
|
def _search_executable(self, executable):
|
||||||
|
cmd = distutils.spawn.find_executable(executable)
|
||||||
|
if not cmd:
|
||||||
|
raise errors.AnsibleError("%s command not found in PATH") % executable
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
def list_jails(self):
|
||||||
|
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
|
||||||
|
cwd=self.runner.basedir,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
|
||||||
|
return stdout.split()
|
||||||
|
|
||||||
|
def get_jail_path(self):
|
||||||
|
p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
|
||||||
|
cwd=self.runner.basedir,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
# remove \n
|
||||||
|
return stdout[:-1]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, *args, **kwargs):
|
||||||
|
self.jail = host
|
||||||
|
self.runner = runner
|
||||||
|
self.host = host
|
||||||
|
self.has_pipelining = False
|
||||||
|
|
||||||
|
if os.geteuid() != 0:
|
||||||
|
raise errors.AnsibleError("jail connection requires running as root")
|
||||||
|
|
||||||
|
self.jls_cmd = self._search_executable('jls')
|
||||||
|
self.jexec_cmd = self._search_executable('jexec')
|
||||||
|
|
||||||
|
if not self.jail in self.list_jails():
|
||||||
|
raise errors.AnsibleError("incorrect jail name %s" % self.jail)
|
||||||
|
|
||||||
|
|
||||||
|
self.host = host
|
||||||
|
# port is unused, since this is local
|
||||||
|
self.port = port
|
||||||
|
|
||||||
|
def connect(self, port=None):
|
||||||
|
''' connect to the chroot; nothing to do here '''
|
||||||
|
|
||||||
|
vvv("THIS IS A LOCAL CHROOT DIR", host=self.jail)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
# a modifier
|
||||||
|
def _generate_cmd(self, executable, cmd):
|
||||||
|
if executable:
|
||||||
|
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
|
||||||
|
else:
|
||||||
|
local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd)
|
||||||
|
return local_cmd
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||||
|
''' run a command on the chroot '''
|
||||||
|
|
||||||
|
if su or su_user:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
# We enter chroot as root so sudo stuff can be ignored
|
||||||
|
local_cmd = self._generate_cmd(executable, cmd)
|
||||||
|
|
||||||
|
vvv("EXEC %s" % (local_cmd), host=self.jail)
|
||||||
|
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
|
||||||
|
cwd=self.runner.basedir,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
return (p.returncode, '', stdout, stderr)
|
||||||
|
|
||||||
|
def _normalize_path(self, path, prefix):
|
||||||
|
if not path.startswith(os.path.sep):
|
||||||
|
path = os.path.join(os.path.sep, path)
|
||||||
|
normpath = os.path.normpath(path)
|
||||||
|
return os.path.join(prefix, normpath[1:])
|
||||||
|
|
||||||
|
def _copy_file(self, in_path, out_path):
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
try:
|
||||||
|
shutil.copyfile(in_path, out_path)
|
||||||
|
except shutil.Error:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||||
|
except IOError:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
''' transfer a file from local to chroot '''
|
||||||
|
|
||||||
|
out_path = self._normalize_path(out_path, self.get_jail_path())
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
|
||||||
|
|
||||||
|
self._copy_file(in_path, out_path)
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' fetch a file from chroot to local '''
|
||||||
|
|
||||||
|
in_path = self._normalize_path(in_path, self.get_jail_path())
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
|
||||||
|
|
||||||
|
self._copy_file(in_path, out_path)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection; nothing to do here '''
|
||||||
|
pass
|
127
v2/ansible/plugins/connections/libvirt_lxc.py
Normal file
127
v2/ansible/plugins/connections/libvirt_lxc.py
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||||
|
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import distutils.spawn
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
from ansible import errors
|
||||||
|
from ansible.callbacks import vvv
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
''' Local lxc based connections '''
|
||||||
|
|
||||||
|
def _search_executable(self, executable):
|
||||||
|
cmd = distutils.spawn.find_executable(executable)
|
||||||
|
if not cmd:
|
||||||
|
raise errors.AnsibleError("%s command not found in PATH") % executable
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
def _check_domain(self, domain):
|
||||||
|
p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
p.communicate()
|
||||||
|
if p.returncode:
|
||||||
|
raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, *args, **kwargs):
|
||||||
|
self.lxc = host
|
||||||
|
|
||||||
|
self.cmd = self._search_executable('virsh')
|
||||||
|
|
||||||
|
self._check_domain(host)
|
||||||
|
|
||||||
|
self.runner = runner
|
||||||
|
self.host = host
|
||||||
|
# port is unused, since this is local
|
||||||
|
self.port = port
|
||||||
|
|
||||||
|
def connect(self, port=None):
|
||||||
|
''' connect to the lxc; nothing to do here '''
|
||||||
|
|
||||||
|
vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def _generate_cmd(self, executable, cmd):
|
||||||
|
if executable:
|
||||||
|
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
|
||||||
|
else:
|
||||||
|
local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
|
||||||
|
return local_cmd
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||||
|
''' run a command on the chroot '''
|
||||||
|
|
||||||
|
if su or su_user:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
# We enter lxc as root so sudo stuff can be ignored
|
||||||
|
local_cmd = self._generate_cmd(executable, cmd)
|
||||||
|
|
||||||
|
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||||
|
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
|
||||||
|
cwd=self.runner.basedir,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
return (p.returncode, '', stdout, stderr)
|
||||||
|
|
||||||
|
def _normalize_path(self, path, prefix):
|
||||||
|
if not path.startswith(os.path.sep):
|
||||||
|
path = os.path.join(os.path.sep, path)
|
||||||
|
normpath = os.path.normpath(path)
|
||||||
|
return os.path.join(prefix, normpath[1:])
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
''' transfer a file from local to lxc '''
|
||||||
|
|
||||||
|
out_path = self._normalize_path(out_path, '/')
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
|
||||||
|
|
||||||
|
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
|
||||||
|
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||||
|
|
||||||
|
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
stdout, stderr = p.communicate(open(in_path,'rb').read())
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' fetch a file from lxc to local '''
|
||||||
|
|
||||||
|
in_path = self._normalize_path(in_path, '/')
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
|
||||||
|
|
||||||
|
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
|
||||||
|
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||||
|
|
||||||
|
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
open(out_path,'wb').write(stdout)
|
||||||
|
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection; nothing to do here '''
|
||||||
|
pass
|
138
v2/ansible/plugins/connections/local.py
Normal file
138
v2/ansible/plugins/connections/local.py
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import traceback
|
||||||
|
import os
|
||||||
|
import pipes
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import select
|
||||||
|
import fcntl
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.plugins.connections import ConnectionBase
|
||||||
|
|
||||||
|
from ansible.utils.debug import debug
|
||||||
|
|
||||||
|
class Connection(ConnectionBase):
|
||||||
|
''' Local based connections '''
|
||||||
|
|
||||||
|
def get_transport(self):
|
||||||
|
''' used to identify this connection object '''
|
||||||
|
return 'local'
|
||||||
|
|
||||||
|
def connect(self, port=None):
|
||||||
|
''' connect to the local host; nothing to do here '''
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||||
|
''' run a command on the local host '''
|
||||||
|
|
||||||
|
debug("in local.exec_command()")
|
||||||
|
# su requires to be run from a terminal, and therefore isn't supported here (yet?)
|
||||||
|
if su or su_user:
|
||||||
|
raise AnsibleError("Internal Error: this module does not support running commands via su")
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
# FIXME: su/sudo stuff needs to be generalized
|
||||||
|
#if not self.runner.sudo or not sudoable:
|
||||||
|
# if executable:
|
||||||
|
# local_cmd = executable.split() + ['-c', cmd]
|
||||||
|
# else:
|
||||||
|
# local_cmd = cmd
|
||||||
|
#else:
|
||||||
|
# local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
|
||||||
|
if executable:
|
||||||
|
local_cmd = executable.split() + ['-c', cmd]
|
||||||
|
else:
|
||||||
|
local_cmd = cmd
|
||||||
|
|
||||||
|
executable = executable.split()[0] if executable else None
|
||||||
|
|
||||||
|
self._display.vvv("%s EXEC %s" % (self._host, local_cmd))
|
||||||
|
# FIXME: cwd= needs to be set to the basedir of the playbook
|
||||||
|
debug("opening command with Popen()")
|
||||||
|
p = subprocess.Popen(
|
||||||
|
local_cmd,
|
||||||
|
shell=isinstance(local_cmd, basestring),
|
||||||
|
executable=executable, #cwd=...
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
debug("done running command with Popen()")
|
||||||
|
|
||||||
|
# FIXME: more su/sudo stuff
|
||||||
|
#if self.runner.sudo and sudoable and self.runner.sudo_pass:
|
||||||
|
# fcntl.fcntl(p.stdout, fcntl.F_SETFL,
|
||||||
|
# fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
# fcntl.fcntl(p.stderr, fcntl.F_SETFL,
|
||||||
|
# fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
# sudo_output = ''
|
||||||
|
# while not sudo_output.endswith(prompt) and success_key not in sudo_output:
|
||||||
|
# rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
|
||||||
|
# [p.stdout, p.stderr], self.runner.timeout)
|
||||||
|
# if p.stdout in rfd:
|
||||||
|
# chunk = p.stdout.read()
|
||||||
|
# elif p.stderr in rfd:
|
||||||
|
# chunk = p.stderr.read()
|
||||||
|
# else:
|
||||||
|
# stdout, stderr = p.communicate()
|
||||||
|
# raise AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output)
|
||||||
|
# if not chunk:
|
||||||
|
# stdout, stderr = p.communicate()
|
||||||
|
# raise AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output)
|
||||||
|
# sudo_output += chunk
|
||||||
|
# if success_key not in sudo_output:
|
||||||
|
# p.stdin.write(self.runner.sudo_pass + '\n')
|
||||||
|
# fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||||
|
# fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||||
|
|
||||||
|
debug("getting output with communicate()")
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
debug("done communicating")
|
||||||
|
|
||||||
|
debug("done with local.exec_command()")
|
||||||
|
return (p.returncode, '', stdout, stderr)
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
''' transfer a file from local to local '''
|
||||||
|
|
||||||
|
#vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
self._display.vvv("%s PUT %s TO %s" % (self._host, in_path, out_path))
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
try:
|
||||||
|
shutil.copyfile(in_path, out_path)
|
||||||
|
except shutil.Error:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||||
|
except IOError:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise AnsibleError("failed to transfer file to %s" % out_path)
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
#vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
self._display.vvv("%s FETCH %s TO %s" % (self._host, in_path, out_path))
|
||||||
|
''' fetch a file from local to local -- for copatibility '''
|
||||||
|
self.put_file(in_path, out_path)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection; nothing to do here '''
|
||||||
|
pass
|
417
v2/ansible/plugins/connections/paramiko_ssh.py
Normal file
417
v2/ansible/plugins/connections/paramiko_ssh.py
Normal file
|
@ -0,0 +1,417 @@
|
||||||
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
# ---
|
||||||
|
# The paramiko transport is provided because many distributions, in particular EL6 and before
|
||||||
|
# do not support ControlPersist in their SSH implementations. This is needed on the Ansible
|
||||||
|
# control machine to be reasonably efficient with connections. Thus paramiko is faster
|
||||||
|
# for most users on these platforms. Users with ControlPersist capability can consider
|
||||||
|
# using -c ssh or configuring the transport in ansible.cfg.
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
import os
|
||||||
|
import pipes
|
||||||
|
import socket
|
||||||
|
import random
|
||||||
|
import logging
|
||||||
|
import tempfile
|
||||||
|
import traceback
|
||||||
|
import fcntl
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from termios import tcflush, TCIFLUSH
|
||||||
|
from binascii import hexlify
|
||||||
|
from ansible.callbacks import vvv
|
||||||
|
from ansible import errors
|
||||||
|
from ansible import utils
|
||||||
|
from ansible import constants as C
|
||||||
|
|
||||||
|
AUTHENTICITY_MSG="""
|
||||||
|
paramiko: The authenticity of host '%s' can't be established.
|
||||||
|
The %s key fingerprint is %s.
|
||||||
|
Are you sure you want to continue connecting (yes/no)?
|
||||||
|
"""
|
||||||
|
|
||||||
|
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
|
||||||
|
HAVE_PARAMIKO=False
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore")
|
||||||
|
try:
|
||||||
|
import paramiko
|
||||||
|
HAVE_PARAMIKO=True
|
||||||
|
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class MyAddPolicy(object):
|
||||||
|
"""
|
||||||
|
Based on AutoAddPolicy in paramiko so we can determine when keys are added
|
||||||
|
and also prompt for input.
|
||||||
|
|
||||||
|
Policy for automatically adding the hostname and new host key to the
|
||||||
|
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, runner):
|
||||||
|
self.runner = runner
|
||||||
|
|
||||||
|
def missing_host_key(self, client, hostname, key):
|
||||||
|
|
||||||
|
if C.HOST_KEY_CHECKING:
|
||||||
|
|
||||||
|
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||||
|
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
|
||||||
|
|
||||||
|
old_stdin = sys.stdin
|
||||||
|
sys.stdin = self.runner._new_stdin
|
||||||
|
fingerprint = hexlify(key.get_fingerprint())
|
||||||
|
ktype = key.get_name()
|
||||||
|
|
||||||
|
# clear out any premature input on sys.stdin
|
||||||
|
tcflush(sys.stdin, TCIFLUSH)
|
||||||
|
|
||||||
|
inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
|
||||||
|
sys.stdin = old_stdin
|
||||||
|
if inp not in ['yes','y','']:
|
||||||
|
fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
|
||||||
|
fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||||
|
raise errors.AnsibleError("host connection rejected by user")
|
||||||
|
|
||||||
|
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
|
||||||
|
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
|
||||||
|
key._added_by_ansible_this_time = True
|
||||||
|
|
||||||
|
# existing implementation below:
|
||||||
|
client._host_keys.add(hostname, key.get_name(), key)
|
||||||
|
|
||||||
|
# host keys are actually saved in close() function below
|
||||||
|
# in order to control ordering.
|
||||||
|
|
||||||
|
|
||||||
|
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
|
||||||
|
|
||||||
|
SSH_CONNECTION_CACHE = {}
|
||||||
|
SFTP_CONNECTION_CACHE = {}
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
''' SSH based connections with Paramiko '''
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
|
||||||
|
|
||||||
|
self.ssh = None
|
||||||
|
self.sftp = None
|
||||||
|
self.runner = runner
|
||||||
|
self.host = host
|
||||||
|
self.port = port or 22
|
||||||
|
self.user = user
|
||||||
|
self.password = password
|
||||||
|
self.private_key_file = private_key_file
|
||||||
|
self.has_pipelining = False
|
||||||
|
|
||||||
|
def _cache_key(self):
|
||||||
|
return "%s__%s__" % (self.host, self.user)
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
cache_key = self._cache_key()
|
||||||
|
if cache_key in SSH_CONNECTION_CACHE:
|
||||||
|
self.ssh = SSH_CONNECTION_CACHE[cache_key]
|
||||||
|
else:
|
||||||
|
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def _connect_uncached(self):
|
||||||
|
''' activates the connection object '''
|
||||||
|
|
||||||
|
if not HAVE_PARAMIKO:
|
||||||
|
raise errors.AnsibleError("paramiko is not installed")
|
||||||
|
|
||||||
|
vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host)
|
||||||
|
|
||||||
|
ssh = paramiko.SSHClient()
|
||||||
|
|
||||||
|
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
|
||||||
|
|
||||||
|
if C.HOST_KEY_CHECKING:
|
||||||
|
ssh.load_system_host_keys()
|
||||||
|
|
||||||
|
ssh.set_missing_host_key_policy(MyAddPolicy(self.runner))
|
||||||
|
|
||||||
|
allow_agent = True
|
||||||
|
|
||||||
|
if self.password is not None:
|
||||||
|
allow_agent = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
if self.private_key_file:
|
||||||
|
key_filename = os.path.expanduser(self.private_key_file)
|
||||||
|
elif self.runner.private_key_file:
|
||||||
|
key_filename = os.path.expanduser(self.runner.private_key_file)
|
||||||
|
else:
|
||||||
|
key_filename = None
|
||||||
|
ssh.connect(self.host, username=self.user, allow_agent=allow_agent, look_for_keys=True,
|
||||||
|
key_filename=key_filename, password=self.password,
|
||||||
|
timeout=self.runner.timeout, port=self.port)
|
||||||
|
|
||||||
|
except Exception, e:
|
||||||
|
|
||||||
|
msg = str(e)
|
||||||
|
if "PID check failed" in msg:
|
||||||
|
raise errors.AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
|
||||||
|
elif "Private key file is encrypted" in msg:
|
||||||
|
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
|
||||||
|
self.user, self.host, self.port, msg)
|
||||||
|
raise errors.AnsibleConnectionFailed(msg)
|
||||||
|
else:
|
||||||
|
raise errors.AnsibleConnectionFailed(msg)
|
||||||
|
|
||||||
|
return ssh
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||||
|
''' run a command on the remote host '''
|
||||||
|
|
||||||
|
if in_data:
|
||||||
|
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||||
|
|
||||||
|
bufsize = 4096
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
self.ssh.get_transport().set_keepalive(5)
|
||||||
|
chan = self.ssh.get_transport().open_session()
|
||||||
|
|
||||||
|
except Exception, e:
|
||||||
|
|
||||||
|
msg = "Failed to open session"
|
||||||
|
if len(str(e)) > 0:
|
||||||
|
msg += ": %s" % str(e)
|
||||||
|
raise errors.AnsibleConnectionFailed(msg)
|
||||||
|
|
||||||
|
no_prompt_out = ''
|
||||||
|
no_prompt_err = ''
|
||||||
|
if not (self.runner.sudo and sudoable) and not (self.runner.su and su):
|
||||||
|
|
||||||
|
if executable:
|
||||||
|
quoted_command = executable + ' -c ' + pipes.quote(cmd)
|
||||||
|
else:
|
||||||
|
quoted_command = cmd
|
||||||
|
vvv("EXEC %s" % quoted_command, host=self.host)
|
||||||
|
chan.exec_command(quoted_command)
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
# sudo usually requires a PTY (cf. requiretty option), therefore
|
||||||
|
# we give it one by default (pty=True in ansble.cfg), and we try
|
||||||
|
# to initialise from the calling environment
|
||||||
|
if C.PARAMIKO_PTY:
|
||||||
|
chan.get_pty(term=os.getenv('TERM', 'vt100'),
|
||||||
|
width=int(os.getenv('COLUMNS', 0)),
|
||||||
|
height=int(os.getenv('LINES', 0)))
|
||||||
|
if self.runner.sudo or sudoable:
|
||||||
|
shcmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
|
||||||
|
elif self.runner.su or su:
|
||||||
|
shcmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
|
||||||
|
|
||||||
|
vvv("EXEC %s" % shcmd, host=self.host)
|
||||||
|
sudo_output = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
chan.exec_command(shcmd)
|
||||||
|
|
||||||
|
if self.runner.sudo_pass or self.runner.su_pass:
|
||||||
|
|
||||||
|
while True:
|
||||||
|
|
||||||
|
if success_key in sudo_output or \
|
||||||
|
(self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
|
||||||
|
(self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
|
||||||
|
break
|
||||||
|
chunk = chan.recv(bufsize)
|
||||||
|
|
||||||
|
if not chunk:
|
||||||
|
if 'unknown user' in sudo_output:
|
||||||
|
raise errors.AnsibleError(
|
||||||
|
'user %s does not exist' % sudo_user)
|
||||||
|
else:
|
||||||
|
raise errors.AnsibleError('ssh connection ' +
|
||||||
|
'closed waiting for password prompt')
|
||||||
|
sudo_output += chunk
|
||||||
|
|
||||||
|
if success_key not in sudo_output:
|
||||||
|
|
||||||
|
if sudoable:
|
||||||
|
chan.sendall(self.runner.sudo_pass + '\n')
|
||||||
|
elif su:
|
||||||
|
chan.sendall(self.runner.su_pass + '\n')
|
||||||
|
else:
|
||||||
|
no_prompt_out += sudo_output
|
||||||
|
no_prompt_err += sudo_output
|
||||||
|
|
||||||
|
except socket.timeout:
|
||||||
|
|
||||||
|
raise errors.AnsibleError('ssh timed out waiting for sudo.\n' + sudo_output)
|
||||||
|
|
||||||
|
stdout = ''.join(chan.makefile('rb', bufsize))
|
||||||
|
stderr = ''.join(chan.makefile_stderr('rb', bufsize))
|
||||||
|
|
||||||
|
return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr)
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
''' transfer a file from local to remote '''
|
||||||
|
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.sftp = self.ssh.open_sftp()
|
||||||
|
except Exception, e:
|
||||||
|
raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.sftp.put(in_path, out_path)
|
||||||
|
except IOError:
|
||||||
|
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||||
|
|
||||||
|
def _connect_sftp(self):
|
||||||
|
|
||||||
|
cache_key = "%s__%s__" % (self.host, self.user)
|
||||||
|
if cache_key in SFTP_CONNECTION_CACHE:
|
||||||
|
return SFTP_CONNECTION_CACHE[cache_key]
|
||||||
|
else:
|
||||||
|
result = SFTP_CONNECTION_CACHE[cache_key] = self.connect().ssh.open_sftp()
|
||||||
|
return result
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' save a remote file to the specified path '''
|
||||||
|
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.sftp = self._connect_sftp()
|
||||||
|
except Exception, e:
|
||||||
|
raise errors.AnsibleError("failed to open a SFTP connection (%s)", e)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.sftp.get(in_path, out_path)
|
||||||
|
except IOError:
|
||||||
|
raise errors.AnsibleError("failed to transfer file from %s" % in_path)
|
||||||
|
|
||||||
|
def _any_keys_added(self):
|
||||||
|
|
||||||
|
added_any = False
|
||||||
|
for hostname, keys in self.ssh._host_keys.iteritems():
|
||||||
|
for keytype, key in keys.iteritems():
|
||||||
|
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
|
||||||
|
if added_this_time:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _save_ssh_host_keys(self, filename):
|
||||||
|
'''
|
||||||
|
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
|
||||||
|
don't complain about it :)
|
||||||
|
'''
|
||||||
|
|
||||||
|
if not self._any_keys_added():
|
||||||
|
return False
|
||||||
|
|
||||||
|
path = os.path.expanduser("~/.ssh")
|
||||||
|
if not os.path.exists(path):
|
||||||
|
os.makedirs(path)
|
||||||
|
|
||||||
|
f = open(filename, 'w')
|
||||||
|
|
||||||
|
for hostname, keys in self.ssh._host_keys.iteritems():
|
||||||
|
|
||||||
|
for keytype, key in keys.iteritems():
|
||||||
|
|
||||||
|
# was f.write
|
||||||
|
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
|
||||||
|
if not added_this_time:
|
||||||
|
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
|
||||||
|
|
||||||
|
for hostname, keys in self.ssh._host_keys.iteritems():
|
||||||
|
|
||||||
|
for keytype, key in keys.iteritems():
|
||||||
|
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
|
||||||
|
if added_this_time:
|
||||||
|
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
|
||||||
|
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' terminate the connection '''
|
||||||
|
|
||||||
|
cache_key = self._cache_key()
|
||||||
|
SSH_CONNECTION_CACHE.pop(cache_key, None)
|
||||||
|
SFTP_CONNECTION_CACHE.pop(cache_key, None)
|
||||||
|
|
||||||
|
if self.sftp is not None:
|
||||||
|
self.sftp.close()
|
||||||
|
|
||||||
|
if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
|
||||||
|
|
||||||
|
# add any new SSH host keys -- warning -- this could be slow
|
||||||
|
lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
|
||||||
|
dirname = os.path.dirname(self.keyfile)
|
||||||
|
if not os.path.exists(dirname):
|
||||||
|
os.makedirs(dirname)
|
||||||
|
|
||||||
|
KEY_LOCK = open(lockfile, 'w')
|
||||||
|
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# just in case any were added recently
|
||||||
|
|
||||||
|
self.ssh.load_system_host_keys()
|
||||||
|
self.ssh._host_keys.update(self.ssh._system_host_keys)
|
||||||
|
|
||||||
|
# gather information about the current key file, so
|
||||||
|
# we can ensure the new file has the correct mode/owner
|
||||||
|
|
||||||
|
key_dir = os.path.dirname(self.keyfile)
|
||||||
|
key_stat = os.stat(self.keyfile)
|
||||||
|
|
||||||
|
# Save the new keys to a temporary file and move it into place
|
||||||
|
# rather than rewriting the file. We set delete=False because
|
||||||
|
# the file will be moved into place rather than cleaned up.
|
||||||
|
|
||||||
|
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
|
||||||
|
os.chmod(tmp_keyfile.name, key_stat.st_mode & 07777)
|
||||||
|
os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid)
|
||||||
|
|
||||||
|
self._save_ssh_host_keys(tmp_keyfile.name)
|
||||||
|
tmp_keyfile.close()
|
||||||
|
|
||||||
|
os.rename(tmp_keyfile.name, self.keyfile)
|
||||||
|
|
||||||
|
except:
|
||||||
|
|
||||||
|
# unable to save keys, including scenario when key was invalid
|
||||||
|
# and caught earlier
|
||||||
|
traceback.print_exc()
|
||||||
|
pass
|
||||||
|
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
self.ssh.close()
|
||||||
|
|
487
v2/ansible/plugins/connections/ssh.py
Normal file
487
v2/ansible/plugins/connections/ssh.py
Normal file
|
@ -0,0 +1,487 @@
|
||||||
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import shlex
|
||||||
|
import pipes
|
||||||
|
import random
|
||||||
|
import select
|
||||||
|
import fcntl
|
||||||
|
import hmac
|
||||||
|
import pwd
|
||||||
|
import gettext
|
||||||
|
import pty
|
||||||
|
from hashlib import sha1
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||||
|
from ansible.plugins.connections import ConnectionBase
|
||||||
|
|
||||||
|
class Connection(ConnectionBase):
|
||||||
|
''' ssh based connections '''
|
||||||
|
|
||||||
|
def __init__(self, host, connection_info, *args, **kwargs):
|
||||||
|
super(Connection, self).__init__(host, connection_info)
|
||||||
|
|
||||||
|
# SSH connection specific init stuff
|
||||||
|
self.HASHED_KEY_MAGIC = "|1|"
|
||||||
|
self._has_pipelining = True
|
||||||
|
|
||||||
|
# FIXME: move the lockfile locations to ActionBase?
|
||||||
|
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||||
|
#self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
|
||||||
|
self._cp_dir = '/tmp'
|
||||||
|
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
def get_transport(self):
|
||||||
|
''' used to identify this connection object from other classes '''
|
||||||
|
return 'ssh'
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
''' connect to the remote host '''
|
||||||
|
|
||||||
|
self._display.vvv("ESTABLISH CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._host)
|
||||||
|
|
||||||
|
self._common_args = []
|
||||||
|
extra_args = C.ANSIBLE_SSH_ARGS
|
||||||
|
if extra_args is not None:
|
||||||
|
# make sure there is no empty string added as this can produce weird errors
|
||||||
|
self._common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
|
||||||
|
else:
|
||||||
|
self._common_args += [
|
||||||
|
"-o", "ControlMaster=auto",
|
||||||
|
"-o", "ControlPersist=60s",
|
||||||
|
"-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)),
|
||||||
|
]
|
||||||
|
|
||||||
|
cp_in_use = False
|
||||||
|
cp_path_set = False
|
||||||
|
for arg in self._common_args:
|
||||||
|
if "ControlPersist" in arg:
|
||||||
|
cp_in_use = True
|
||||||
|
if "ControlPath" in arg:
|
||||||
|
cp_path_set = True
|
||||||
|
|
||||||
|
if cp_in_use and not cp_path_set:
|
||||||
|
self._common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir))]
|
||||||
|
|
||||||
|
if not C.HOST_KEY_CHECKING:
|
||||||
|
self._common_args += ["-o", "StrictHostKeyChecking=no"]
|
||||||
|
|
||||||
|
if self._connection_info.port is not None:
|
||||||
|
self._common_args += ["-o", "Port=%d" % (self._connection_info.port)]
|
||||||
|
#if self.private_key_file is not None:
|
||||||
|
# self._common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
|
||||||
|
#elif self.runner.private_key_file is not None:
|
||||||
|
# self._common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
|
||||||
|
if self._connection_info.password:
|
||||||
|
self._common_args += ["-o", "GSSAPIAuthentication=no",
|
||||||
|
"-o", "PubkeyAuthentication=no"]
|
||||||
|
else:
|
||||||
|
self._common_args += ["-o", "KbdInteractiveAuthentication=no",
|
||||||
|
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
|
||||||
|
"-o", "PasswordAuthentication=no"]
|
||||||
|
if self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]:
|
||||||
|
self._common_args += ["-o", "User="+self._connection_info.remote_user]
|
||||||
|
# FIXME: figure out where this goes
|
||||||
|
#self._common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
|
||||||
|
self._common_args += ["-o", "ConnectTimeout=15"]
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def _run(self, cmd, indata):
|
||||||
|
if indata:
|
||||||
|
# do not use pseudo-pty
|
||||||
|
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
stdin = p.stdin
|
||||||
|
else:
|
||||||
|
# try to use upseudo-pty
|
||||||
|
try:
|
||||||
|
# Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
|
||||||
|
master, slave = pty.openpty()
|
||||||
|
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
stdin = os.fdopen(master, 'w', 0)
|
||||||
|
os.close(slave)
|
||||||
|
except:
|
||||||
|
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
stdin = p.stdin
|
||||||
|
|
||||||
|
return (p, stdin)
|
||||||
|
|
||||||
|
def _password_cmd(self):
|
||||||
|
if self._connection_info.password:
|
||||||
|
try:
|
||||||
|
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
p.communicate()
|
||||||
|
except OSError:
|
||||||
|
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
|
||||||
|
(self.rfd, self.wfd) = os.pipe()
|
||||||
|
return ["sshpass", "-d%d" % self.rfd]
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _send_password(self):
|
||||||
|
if self._connection_info.password:
|
||||||
|
os.close(self.rfd)
|
||||||
|
os.write(self.wfd, "%s\n" % self._connection_info.password)
|
||||||
|
os.close(self.wfd)
|
||||||
|
|
||||||
|
def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
|
||||||
|
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||||
|
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||||
|
# We can't use p.communicate here because the ControlMaster may have stdout open as well
|
||||||
|
stdout = ''
|
||||||
|
stderr = ''
|
||||||
|
rpipes = [p.stdout, p.stderr]
|
||||||
|
if indata:
|
||||||
|
try:
|
||||||
|
stdin.write(indata)
|
||||||
|
stdin.close()
|
||||||
|
except:
|
||||||
|
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
|
||||||
|
# Read stdout/stderr from process
|
||||||
|
while True:
|
||||||
|
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
|
||||||
|
|
||||||
|
# FIXME: su/sudo stuff
|
||||||
|
# fail early if the sudo/su password is wrong
|
||||||
|
#if self.runner.sudo and sudoable:
|
||||||
|
# if self.runner.sudo_pass:
|
||||||
|
# incorrect_password = gettext.dgettext(
|
||||||
|
# "sudo", "Sorry, try again.")
|
||||||
|
# if stdout.endswith("%s\r\n%s" % (incorrect_password,
|
||||||
|
# prompt)):
|
||||||
|
# raise AnsibleError('Incorrect sudo password')
|
||||||
|
#
|
||||||
|
# if stdout.endswith(prompt):
|
||||||
|
# raise AnsibleError('Missing sudo password')
|
||||||
|
#
|
||||||
|
#if self.runner.su and su and self.runner.su_pass:
|
||||||
|
# incorrect_password = gettext.dgettext(
|
||||||
|
# "su", "Sorry")
|
||||||
|
# if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
|
||||||
|
# raise AnsibleError('Incorrect su password')
|
||||||
|
|
||||||
|
if p.stdout in rfd:
|
||||||
|
dat = os.read(p.stdout.fileno(), 9000)
|
||||||
|
stdout += dat
|
||||||
|
if dat == '':
|
||||||
|
rpipes.remove(p.stdout)
|
||||||
|
if p.stderr in rfd:
|
||||||
|
dat = os.read(p.stderr.fileno(), 9000)
|
||||||
|
stderr += dat
|
||||||
|
if dat == '':
|
||||||
|
rpipes.remove(p.stderr)
|
||||||
|
# only break out if no pipes are left to read or
|
||||||
|
# the pipes are completely read and
|
||||||
|
# the process is terminated
|
||||||
|
if (not rpipes or not rfd) and p.poll() is not None:
|
||||||
|
break
|
||||||
|
# No pipes are left to read but process is not yet terminated
|
||||||
|
# Only then it is safe to wait for the process to be finished
|
||||||
|
# NOTE: Actually p.poll() is always None here if rpipes is empty
|
||||||
|
elif not rpipes and p.poll() == None:
|
||||||
|
p.wait()
|
||||||
|
# The process is terminated. Since no pipes to read from are
|
||||||
|
# left, there is no need to call select() again.
|
||||||
|
break
|
||||||
|
# close stdin after process is terminated and stdout/stderr are read
|
||||||
|
# completely (see also issue #848)
|
||||||
|
stdin.close()
|
||||||
|
return (p.returncode, stdout, stderr)
|
||||||
|
|
||||||
|
def not_in_host_file(self, host):
|
||||||
|
if 'USER' in os.environ:
|
||||||
|
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||||
|
else:
|
||||||
|
user_host_file = "~/.ssh/known_hosts"
|
||||||
|
user_host_file = os.path.expanduser(user_host_file)
|
||||||
|
|
||||||
|
host_file_list = []
|
||||||
|
host_file_list.append(user_host_file)
|
||||||
|
host_file_list.append("/etc/ssh/ssh_known_hosts")
|
||||||
|
host_file_list.append("/etc/ssh/ssh_known_hosts2")
|
||||||
|
|
||||||
|
hfiles_not_found = 0
|
||||||
|
for hf in host_file_list:
|
||||||
|
if not os.path.exists(hf):
|
||||||
|
hfiles_not_found += 1
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
host_fh = open(hf)
|
||||||
|
except IOError, e:
|
||||||
|
hfiles_not_found += 1
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
data = host_fh.read()
|
||||||
|
host_fh.close()
|
||||||
|
|
||||||
|
for line in data.split("\n"):
|
||||||
|
if line is None or " " not in line:
|
||||||
|
continue
|
||||||
|
tokens = line.split()
|
||||||
|
if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
|
||||||
|
# this is a hashed known host entry
|
||||||
|
try:
|
||||||
|
(kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
|
||||||
|
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
|
||||||
|
hash.update(host)
|
||||||
|
if hash.digest() == kn_host.decode('base64'):
|
||||||
|
return False
|
||||||
|
except:
|
||||||
|
# invalid hashed host key, skip it
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# standard host file entry
|
||||||
|
if host in tokens[0]:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if (hfiles_not_found == len(host_file_list)):
|
||||||
|
self._display.vvv("EXEC previous known host file not found for %s" % host)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoable=False):
|
||||||
|
''' run a command on the remote host '''
|
||||||
|
|
||||||
|
ssh_cmd = self._password_cmd()
|
||||||
|
ssh_cmd += ["ssh", "-C"]
|
||||||
|
if not in_data:
|
||||||
|
# we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
|
||||||
|
# inside a tty automatically invokes the python interactive-mode but the modules are not
|
||||||
|
# compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
|
||||||
|
ssh_cmd += ["-tt"]
|
||||||
|
# FIXME: verbosity needs to move, most likely into connection info or
|
||||||
|
# whatever other context we pass around instead of runner objects
|
||||||
|
#if utils.VERBOSITY > 3:
|
||||||
|
# ssh_cmd += ["-vvv"]
|
||||||
|
#else:
|
||||||
|
# ssh_cmd += ["-q"]
|
||||||
|
ssh_cmd += ["-q"]
|
||||||
|
ssh_cmd += self._common_args
|
||||||
|
|
||||||
|
#if self._ipv6:
|
||||||
|
# ssh_cmd += ['-6']
|
||||||
|
ssh_cmd += [self._host.ipv4_address]
|
||||||
|
|
||||||
|
if not (self._connection_info.sudo or self._connection_info.su) or not sudoable:
|
||||||
|
prompt = None
|
||||||
|
if executable:
|
||||||
|
ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
|
||||||
|
else:
|
||||||
|
ssh_cmd.append(cmd)
|
||||||
|
elif self._connection_info.su and self._connection_info.su_user:
|
||||||
|
su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd)
|
||||||
|
ssh_cmd.append(su_cmd)
|
||||||
|
else:
|
||||||
|
# FIXME: hard-coded sudo_exe here
|
||||||
|
sudo_cmd, prompt, success_key = self._connection_info.make_sudo_cmd('/usr/bin/sudo', executable, cmd)
|
||||||
|
ssh_cmd.append(sudo_cmd)
|
||||||
|
|
||||||
|
self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._host)
|
||||||
|
|
||||||
|
not_in_host_file = self.not_in_host_file(self._host.get_name())
|
||||||
|
|
||||||
|
# FIXME: move the locations of these lock files, same as init above
|
||||||
|
#if C.HOST_KEY_CHECKING and not_in_host_file:
|
||||||
|
# # lock around the initial SSH connectivity so the user prompt about whether to add
|
||||||
|
# # the host to known hosts is not intermingled with multiprocess output.
|
||||||
|
# fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||||
|
# fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
|
||||||
|
|
||||||
|
# create process
|
||||||
|
(p, stdin) = self._run(ssh_cmd, in_data)
|
||||||
|
|
||||||
|
self._send_password()
|
||||||
|
|
||||||
|
no_prompt_out = ''
|
||||||
|
no_prompt_err = ''
|
||||||
|
# FIXME: su/sudo stuff
|
||||||
|
#if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
|
||||||
|
# (self.runner.su and su and self.runner.su_pass):
|
||||||
|
# # several cases are handled for sudo privileges with password
|
||||||
|
# # * NOPASSWD (tty & no-tty): detect success_key on stdout
|
||||||
|
# # * without NOPASSWD:
|
||||||
|
# # * detect prompt on stdout (tty)
|
||||||
|
# # * detect prompt on stderr (no-tty)
|
||||||
|
# fcntl.fcntl(p.stdout, fcntl.F_SETFL,
|
||||||
|
# fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
# fcntl.fcntl(p.stderr, fcntl.F_SETFL,
|
||||||
|
# fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
# sudo_output = ''
|
||||||
|
# sudo_errput = ''
|
||||||
|
#
|
||||||
|
# while True:
|
||||||
|
# if success_key in sudo_output or \
|
||||||
|
# (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
|
||||||
|
# (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
|
||||||
|
# break
|
||||||
|
self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._host)
|
||||||
|
|
||||||
|
not_in_host_file = self.not_in_host_file(self._host.get_name())
|
||||||
|
|
||||||
|
# FIXME: file locations
|
||||||
|
#if C.HOST_KEY_CHECKING and not_in_host_file:
|
||||||
|
# # lock around the initial SSH connectivity so the user prompt about whether to add
|
||||||
|
# # the host to known hosts is not intermingled with multiprocess output.
|
||||||
|
# fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||||
|
# fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
|
||||||
|
|
||||||
|
# create process
|
||||||
|
(p, stdin) = self._run(ssh_cmd, in_data)
|
||||||
|
|
||||||
|
self._send_password()
|
||||||
|
|
||||||
|
no_prompt_out = ''
|
||||||
|
no_prompt_err = ''
|
||||||
|
# FIXME: su/sudo stuff
|
||||||
|
#if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
|
||||||
|
# (self.runner.su and su and self.runner.su_pass):
|
||||||
|
# # several cases are handled for sudo privileges with password
|
||||||
|
# # * NOPASSWD (tty & no-tty): detect success_key on stdout
|
||||||
|
# # * without NOPASSWD:
|
||||||
|
# # * detect prompt on stdout (tty)
|
||||||
|
# # * detect prompt on stderr (no-tty)
|
||||||
|
# fcntl.fcntl(p.stdout, fcntl.F_SETFL,
|
||||||
|
# fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
# fcntl.fcntl(p.stderr, fcntl.F_SETFL,
|
||||||
|
# fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||||
|
# sudo_output = ''
|
||||||
|
# sudo_errput = ''
|
||||||
|
#
|
||||||
|
# while True:
|
||||||
|
# if success_key in sudo_output or \
|
||||||
|
# (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
|
||||||
|
# (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
|
||||||
|
# break
|
||||||
|
#
|
||||||
|
# rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
|
||||||
|
# [p.stdout], self.runner.timeout)
|
||||||
|
# if p.stderr in rfd:
|
||||||
|
# chunk = p.stderr.read()
|
||||||
|
# if not chunk:
|
||||||
|
# raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
|
||||||
|
# sudo_errput += chunk
|
||||||
|
# incorrect_password = gettext.dgettext(
|
||||||
|
# "sudo", "Sorry, try again.")
|
||||||
|
# if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
|
||||||
|
# raise AnsibleError('Incorrect sudo password')
|
||||||
|
# elif sudo_errput.endswith(prompt):
|
||||||
|
# stdin.write(self.runner.sudo_pass + '\n')
|
||||||
|
#
|
||||||
|
# if p.stdout in rfd:
|
||||||
|
# chunk = p.stdout.read()
|
||||||
|
# if not chunk:
|
||||||
|
# raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
|
||||||
|
# sudo_output += chunk
|
||||||
|
#
|
||||||
|
# if not rfd:
|
||||||
|
# # timeout. wrap up process communication
|
||||||
|
# stdout = p.communicate()
|
||||||
|
# raise AnsibleError('ssh connection error waiting for sudo or su password prompt')
|
||||||
|
#
|
||||||
|
# if success_key not in sudo_output:
|
||||||
|
# if sudoable:
|
||||||
|
# stdin.write(self.runner.sudo_pass + '\n')
|
||||||
|
# elif su:
|
||||||
|
# stdin.write(self.runner.su_pass + '\n')
|
||||||
|
# else:
|
||||||
|
# no_prompt_out += sudo_output
|
||||||
|
# no_prompt_err += sudo_errput
|
||||||
|
|
||||||
|
#(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
|
||||||
|
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt)
|
||||||
|
|
||||||
|
#if C.HOST_KEY_CHECKING and not_in_host_file:
|
||||||
|
# # lock around the initial SSH connectivity so the user prompt about whether to add
|
||||||
|
# # the host to known hosts is not intermingled with multiprocess output.
|
||||||
|
# fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
|
||||||
|
# fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||||
|
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
|
||||||
|
|
||||||
|
if C.HOST_KEY_CHECKING:
|
||||||
|
if ssh_cmd[0] == "sshpass" and p.returncode == 6:
|
||||||
|
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
|
||||||
|
|
||||||
|
if p.returncode != 0 and controlpersisterror:
|
||||||
|
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
|
||||||
|
# FIXME: module name isn't in runner
|
||||||
|
#if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
|
||||||
|
if p.returncode == 255 and in_data:
|
||||||
|
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
|
||||||
|
|
||||||
|
return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
''' transfer a file from local to remote '''
|
||||||
|
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._host)
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
cmd = self._password_cmd()
|
||||||
|
|
||||||
|
# FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
|
||||||
|
host = self._host.ipv4_address
|
||||||
|
#if self._ipv6:
|
||||||
|
# host = '[%s]' % host
|
||||||
|
|
||||||
|
if C.DEFAULT_SCP_IF_SSH:
|
||||||
|
cmd += ["scp"] + self._common_args
|
||||||
|
cmd += [in_path,host + ":" + pipes.quote(out_path)]
|
||||||
|
indata = None
|
||||||
|
else:
|
||||||
|
cmd += ["sftp"] + self._common_args + [host]
|
||||||
|
indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
|
||||||
|
|
||||||
|
(p, stdin) = self._run(cmd, indata)
|
||||||
|
|
||||||
|
self._send_password()
|
||||||
|
|
||||||
|
(returncode, stdout, stderr) = self._communicate(p, stdin, indata)
|
||||||
|
|
||||||
|
if returncode != 0:
|
||||||
|
raise AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
''' fetch a file from remote to local '''
|
||||||
|
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._host)
|
||||||
|
cmd = self._password_cmd()
|
||||||
|
|
||||||
|
# FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
|
||||||
|
host = self._host.ipv4_address
|
||||||
|
#if self._ipv6:
|
||||||
|
# host = '[%s]' % self._host
|
||||||
|
|
||||||
|
if C.DEFAULT_SCP_IF_SSH:
|
||||||
|
cmd += ["scp"] + self._common_args
|
||||||
|
cmd += [host + ":" + in_path, out_path]
|
||||||
|
indata = None
|
||||||
|
else:
|
||||||
|
cmd += ["sftp"] + self._common_args + [host]
|
||||||
|
indata = "get %s %s\n" % (in_path, out_path)
|
||||||
|
|
||||||
|
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
self._send_password()
|
||||||
|
stdout, stderr = p.communicate(indata)
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
raise AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
''' not applicable since we're executing openssh binaries '''
|
||||||
|
pass
|
||||||
|
|
258
v2/ansible/plugins/connections/winrm.py
Normal file
258
v2/ansible/plugins/connections/winrm.py
Normal file
|
@ -0,0 +1,258 @@
|
||||||
|
# (c) 2014, Chris Church <chris@ninemoreminutes.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible.
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import hashlib
|
||||||
|
import imp
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shlex
|
||||||
|
import traceback
|
||||||
|
import urlparse
|
||||||
|
from ansible import errors
|
||||||
|
from ansible import utils
|
||||||
|
from ansible.callbacks import vvv, vvvv, verbose
|
||||||
|
from ansible.runner.shell_plugins import powershell
|
||||||
|
|
||||||
|
try:
|
||||||
|
from winrm import Response
|
||||||
|
from winrm.exceptions import WinRMTransportError
|
||||||
|
from winrm.protocol import Protocol
|
||||||
|
except ImportError:
|
||||||
|
raise errors.AnsibleError("winrm is not installed")
|
||||||
|
|
||||||
|
_winrm_cache = {
|
||||||
|
# 'user:pwhash@host:port': <protocol instance>
|
||||||
|
}
|
||||||
|
|
||||||
|
def vvvvv(msg, host=None):
|
||||||
|
verbose(msg, host=host, caplevel=4)
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
'''WinRM connections over HTTP/HTTPS.'''
|
||||||
|
|
||||||
|
def __init__(self, runner, host, port, user, password, *args, **kwargs):
|
||||||
|
self.runner = runner
|
||||||
|
self.host = host
|
||||||
|
self.port = port
|
||||||
|
self.user = user
|
||||||
|
self.password = password
|
||||||
|
self.has_pipelining = False
|
||||||
|
self.default_shell = 'powershell'
|
||||||
|
self.default_suffixes = ['.ps1', '']
|
||||||
|
self.protocol = None
|
||||||
|
self.shell_id = None
|
||||||
|
self.delegate = None
|
||||||
|
|
||||||
|
def _winrm_connect(self):
|
||||||
|
'''
|
||||||
|
Establish a WinRM connection over HTTP/HTTPS.
|
||||||
|
'''
|
||||||
|
port = self.port or 5986
|
||||||
|
vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
|
||||||
|
(self.user, port, self.host), host=self.host)
|
||||||
|
netloc = '%s:%d' % (self.host, port)
|
||||||
|
cache_key = '%s:%s@%s:%d' % (self.user, hashlib.md5(self.password).hexdigest(), self.host, port)
|
||||||
|
if cache_key in _winrm_cache:
|
||||||
|
vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host)
|
||||||
|
return _winrm_cache[cache_key]
|
||||||
|
transport_schemes = [('plaintext', 'https'), ('plaintext', 'http')] # FIXME: ssl/kerberos
|
||||||
|
if port == 5985:
|
||||||
|
transport_schemes = reversed(transport_schemes)
|
||||||
|
exc = None
|
||||||
|
for transport, scheme in transport_schemes:
|
||||||
|
endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
|
||||||
|
vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
|
||||||
|
host=self.host)
|
||||||
|
protocol = Protocol(endpoint, transport=transport,
|
||||||
|
username=self.user, password=self.password)
|
||||||
|
try:
|
||||||
|
protocol.send_message('')
|
||||||
|
_winrm_cache[cache_key] = protocol
|
||||||
|
return protocol
|
||||||
|
except WinRMTransportError, exc:
|
||||||
|
err_msg = str(exc)
|
||||||
|
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
|
||||||
|
raise errors.AnsibleError("the connection attempt timed out")
|
||||||
|
m = re.search(r'Code\s+?(\d{3})', err_msg)
|
||||||
|
if m:
|
||||||
|
code = int(m.groups()[0])
|
||||||
|
if code == 401:
|
||||||
|
raise errors.AnsibleError("the username/password specified for this server was incorrect")
|
||||||
|
elif code == 411:
|
||||||
|
_winrm_cache[cache_key] = protocol
|
||||||
|
return protocol
|
||||||
|
vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
|
||||||
|
continue
|
||||||
|
if exc:
|
||||||
|
raise errors.AnsibleError(str(exc))
|
||||||
|
|
||||||
|
def _winrm_exec(self, command, args=(), from_exec=False):
|
||||||
|
if from_exec:
|
||||||
|
vvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
|
||||||
|
else:
|
||||||
|
vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
|
||||||
|
if not self.protocol:
|
||||||
|
self.protocol = self._winrm_connect()
|
||||||
|
if not self.shell_id:
|
||||||
|
self.shell_id = self.protocol.open_shell()
|
||||||
|
command_id = None
|
||||||
|
try:
|
||||||
|
command_id = self.protocol.run_command(self.shell_id, command, args)
|
||||||
|
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
|
||||||
|
if from_exec:
|
||||||
|
vvvv('WINRM RESULT %r' % response, host=self.host)
|
||||||
|
else:
|
||||||
|
vvvvv('WINRM RESULT %r' % response, host=self.host)
|
||||||
|
vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host)
|
||||||
|
vvvvv('WINRM STDERR %s' % response.std_err, host=self.host)
|
||||||
|
return response
|
||||||
|
finally:
|
||||||
|
if command_id:
|
||||||
|
self.protocol.cleanup_command(self.shell_id, command_id)
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
if not self.protocol:
|
||||||
|
self.protocol = self._winrm_connect()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None):
|
||||||
|
cmd = cmd.encode('utf-8')
|
||||||
|
cmd_parts = shlex.split(cmd, posix=False)
|
||||||
|
if '-EncodedCommand' in cmd_parts:
|
||||||
|
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
|
||||||
|
decoded_cmd = base64.b64decode(encoded_cmd)
|
||||||
|
vvv("EXEC %s" % decoded_cmd, host=self.host)
|
||||||
|
else:
|
||||||
|
vvv("EXEC %s" % cmd, host=self.host)
|
||||||
|
# For script/raw support.
|
||||||
|
if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
|
||||||
|
script = powershell._build_file_cmd(cmd_parts)
|
||||||
|
cmd_parts = powershell._encode_script(script, as_list=True)
|
||||||
|
try:
|
||||||
|
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
|
||||||
|
except Exception, e:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to exec cmd %s" % cmd)
|
||||||
|
return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
|
||||||
|
|
||||||
|
def put_file(self, in_path, out_path):
|
||||||
|
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
if not os.path.exists(in_path):
|
||||||
|
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||||
|
with open(in_path) as in_file:
|
||||||
|
in_size = os.path.getsize(in_path)
|
||||||
|
script_template = '''
|
||||||
|
$s = [System.IO.File]::OpenWrite("%s");
|
||||||
|
[void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
|
||||||
|
$b = [System.Convert]::FromBase64String("%s");
|
||||||
|
[void]$s.Write($b, 0, $b.length);
|
||||||
|
[void]$s.SetLength(%d);
|
||||||
|
[void]$s.Close();
|
||||||
|
'''
|
||||||
|
# Determine max size of data we can pass per command.
|
||||||
|
script = script_template % (powershell._escape(out_path), in_size, '', in_size)
|
||||||
|
cmd = powershell._encode_script(script)
|
||||||
|
# Encode script with no data, subtract its length from 8190 (max
|
||||||
|
# windows command length), divide by 2.67 (UTF16LE base64 command
|
||||||
|
# encoding), then by 1.35 again (data base64 encoding).
|
||||||
|
buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
|
||||||
|
for offset in xrange(0, in_size, buffer_size):
|
||||||
|
try:
|
||||||
|
out_data = in_file.read(buffer_size)
|
||||||
|
if offset == 0:
|
||||||
|
if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
|
||||||
|
out_path = out_path + '.ps1'
|
||||||
|
b64_data = base64.b64encode(out_data)
|
||||||
|
script = script_template % (powershell._escape(out_path), offset, b64_data, in_size)
|
||||||
|
vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host)
|
||||||
|
cmd_parts = powershell._encode_script(script, as_list=True)
|
||||||
|
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
|
||||||
|
if result.status_code != 0:
|
||||||
|
raise IOError(result.std_err.encode('utf-8'))
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||||
|
|
||||||
|
def fetch_file(self, in_path, out_path):
|
||||||
|
out_path = out_path.replace('\\', '/')
|
||||||
|
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||||
|
buffer_size = 2**20 # 1MB chunks
|
||||||
|
if not os.path.exists(os.path.dirname(out_path)):
|
||||||
|
os.makedirs(os.path.dirname(out_path))
|
||||||
|
out_file = None
|
||||||
|
try:
|
||||||
|
offset = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
script = '''
|
||||||
|
If (Test-Path -PathType Leaf "%(path)s")
|
||||||
|
{
|
||||||
|
$stream = [System.IO.File]::OpenRead("%(path)s");
|
||||||
|
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
|
||||||
|
$buffer = New-Object Byte[] %(buffer_size)d;
|
||||||
|
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
|
||||||
|
$bytes = $buffer[0..($bytesRead-1)];
|
||||||
|
[System.Convert]::ToBase64String($bytes);
|
||||||
|
$stream.Close() | Out-Null;
|
||||||
|
}
|
||||||
|
ElseIf (Test-Path -PathType Container "%(path)s")
|
||||||
|
{
|
||||||
|
Write-Host "[DIR]";
|
||||||
|
}
|
||||||
|
Else
|
||||||
|
{
|
||||||
|
Write-Error "%(path)s does not exist";
|
||||||
|
Exit 1;
|
||||||
|
}
|
||||||
|
''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset)
|
||||||
|
vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host)
|
||||||
|
cmd_parts = powershell._encode_script(script, as_list=True)
|
||||||
|
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
|
||||||
|
if result.status_code != 0:
|
||||||
|
raise IOError(result.std_err.encode('utf-8'))
|
||||||
|
if result.std_out.strip() == '[DIR]':
|
||||||
|
data = None
|
||||||
|
else:
|
||||||
|
data = base64.b64decode(result.std_out.strip())
|
||||||
|
if data is None:
|
||||||
|
if not os.path.exists(out_path):
|
||||||
|
os.makedirs(out_path)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if not out_file:
|
||||||
|
# If out_path is a directory and we're expecting a file, bail out now.
|
||||||
|
if os.path.isdir(out_path):
|
||||||
|
break
|
||||||
|
out_file = open(out_path, 'wb')
|
||||||
|
out_file.write(data)
|
||||||
|
if len(data) < buffer_size:
|
||||||
|
break
|
||||||
|
offset += len(data)
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||||
|
finally:
|
||||||
|
if out_file:
|
||||||
|
out_file.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.protocol and self.shell_id:
|
||||||
|
self.protocol.close_shell(self.shell_id)
|
||||||
|
self.shell_id = None
|
323
v2/ansible/plugins/filter/core.py
Normal file
323
v2/ansible/plugins/filter/core.py
Normal file
|
@ -0,0 +1,323 @@
|
||||||
|
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import os.path
|
||||||
|
import yaml
|
||||||
|
import types
|
||||||
|
import pipes
|
||||||
|
import glob
|
||||||
|
import re
|
||||||
|
import collections
|
||||||
|
import operator as py_operator
|
||||||
|
from distutils.version import LooseVersion, StrictVersion
|
||||||
|
from random import SystemRandom, shuffle
|
||||||
|
from jinja2.filters import environmentfilter
|
||||||
|
|
||||||
|
from ansible.errors import *
|
||||||
|
from ansible.utils.hashing import md5s, checksum_s
|
||||||
|
|
||||||
|
def to_nice_yaml(*a, **kw):
|
||||||
|
'''Make verbose, human readable yaml'''
|
||||||
|
return yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw)
|
||||||
|
|
||||||
|
def to_json(a, *args, **kw):
|
||||||
|
''' Convert the value to JSON '''
|
||||||
|
return json.dumps(a, *args, **kw)
|
||||||
|
|
||||||
|
def to_nice_json(a, *args, **kw):
|
||||||
|
'''Make verbose, human readable JSON'''
|
||||||
|
return json.dumps(a, indent=4, sort_keys=True, *args, **kw)
|
||||||
|
|
||||||
|
def failed(*a, **kw):
|
||||||
|
''' Test if task result yields failed '''
|
||||||
|
item = a[0]
|
||||||
|
if type(item) != dict:
|
||||||
|
raise errors.AnsibleFilterError("|failed expects a dictionary")
|
||||||
|
rc = item.get('rc',0)
|
||||||
|
failed = item.get('failed',False)
|
||||||
|
if rc != 0 or failed:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def success(*a, **kw):
|
||||||
|
''' Test if task result yields success '''
|
||||||
|
return not failed(*a, **kw)
|
||||||
|
|
||||||
|
def changed(*a, **kw):
|
||||||
|
''' Test if task result yields changed '''
|
||||||
|
item = a[0]
|
||||||
|
if type(item) != dict:
|
||||||
|
raise errors.AnsibleFilterError("|changed expects a dictionary")
|
||||||
|
if not 'changed' in item:
|
||||||
|
changed = False
|
||||||
|
if ('results' in item # some modules return a 'results' key
|
||||||
|
and type(item['results']) == list
|
||||||
|
and type(item['results'][0]) == dict):
|
||||||
|
for result in item['results']:
|
||||||
|
changed = changed or result.get('changed', False)
|
||||||
|
else:
|
||||||
|
changed = item.get('changed', False)
|
||||||
|
return changed
|
||||||
|
|
||||||
|
def skipped(*a, **kw):
|
||||||
|
''' Test if task result yields skipped '''
|
||||||
|
item = a[0]
|
||||||
|
if type(item) != dict:
|
||||||
|
raise errors.AnsibleFilterError("|skipped expects a dictionary")
|
||||||
|
skipped = item.get('skipped', False)
|
||||||
|
return skipped
|
||||||
|
|
||||||
|
def mandatory(a):
|
||||||
|
''' Make a variable mandatory '''
|
||||||
|
try:
|
||||||
|
a
|
||||||
|
except NameError:
|
||||||
|
raise errors.AnsibleFilterError('Mandatory variable not defined.')
|
||||||
|
else:
|
||||||
|
return a
|
||||||
|
|
||||||
|
def bool(a):
|
||||||
|
''' return a bool for the arg '''
|
||||||
|
if a is None or type(a) == bool:
|
||||||
|
return a
|
||||||
|
if type(a) in types.StringTypes:
|
||||||
|
a = a.lower()
|
||||||
|
if a in ['yes', 'on', '1', 'true', 1]:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def quote(a):
|
||||||
|
''' return its argument quoted for shell usage '''
|
||||||
|
return pipes.quote(a)
|
||||||
|
|
||||||
|
def fileglob(pathname):
|
||||||
|
''' return list of matched files for glob '''
|
||||||
|
return glob.glob(pathname)
|
||||||
|
|
||||||
|
def regex(value='', pattern='', ignorecase=False, match_type='search'):
|
||||||
|
''' Expose `re` as a boolean filter using the `search` method by default.
|
||||||
|
This is likely only useful for `search` and `match` which already
|
||||||
|
have their own filters.
|
||||||
|
'''
|
||||||
|
if ignorecase:
|
||||||
|
flags = re.I
|
||||||
|
else:
|
||||||
|
flags = 0
|
||||||
|
_re = re.compile(pattern, flags=flags)
|
||||||
|
_bool = __builtins__.get('bool')
|
||||||
|
return _bool(getattr(_re, match_type, 'search')(value))
|
||||||
|
|
||||||
|
def match(value, pattern='', ignorecase=False):
|
||||||
|
''' Perform a `re.match` returning a boolean '''
|
||||||
|
return regex(value, pattern, ignorecase, 'match')
|
||||||
|
|
||||||
|
def search(value, pattern='', ignorecase=False):
|
||||||
|
''' Perform a `re.search` returning a boolean '''
|
||||||
|
return regex(value, pattern, ignorecase, 'search')
|
||||||
|
|
||||||
|
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
|
||||||
|
''' Perform a `re.sub` returning a string '''
|
||||||
|
|
||||||
|
if not isinstance(value, basestring):
|
||||||
|
value = str(value)
|
||||||
|
|
||||||
|
if ignorecase:
|
||||||
|
flags = re.I
|
||||||
|
else:
|
||||||
|
flags = 0
|
||||||
|
_re = re.compile(pattern, flags=flags)
|
||||||
|
return _re.sub(replacement, value)
|
||||||
|
|
||||||
|
def unique(a):
|
||||||
|
if isinstance(a,collections.Hashable):
|
||||||
|
c = set(a)
|
||||||
|
else:
|
||||||
|
c = []
|
||||||
|
for x in a:
|
||||||
|
if x not in c:
|
||||||
|
c.append(x)
|
||||||
|
return c
|
||||||
|
|
||||||
|
def intersect(a, b):
|
||||||
|
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||||
|
c = set(a) & set(b)
|
||||||
|
else:
|
||||||
|
c = unique(filter(lambda x: x in b, a))
|
||||||
|
return c
|
||||||
|
|
||||||
|
def difference(a, b):
|
||||||
|
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||||
|
c = set(a) - set(b)
|
||||||
|
else:
|
||||||
|
c = unique(filter(lambda x: x not in b, a))
|
||||||
|
return c
|
||||||
|
|
||||||
|
def symmetric_difference(a, b):
|
||||||
|
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||||
|
c = set(a) ^ set(b)
|
||||||
|
else:
|
||||||
|
c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
|
||||||
|
return c
|
||||||
|
|
||||||
|
def union(a, b):
|
||||||
|
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||||
|
c = set(a) | set(b)
|
||||||
|
else:
|
||||||
|
c = unique(a + b)
|
||||||
|
return c
|
||||||
|
|
||||||
|
def min(a):
|
||||||
|
_min = __builtins__.get('min')
|
||||||
|
return _min(a);
|
||||||
|
|
||||||
|
def max(a):
|
||||||
|
_max = __builtins__.get('max')
|
||||||
|
return _max(a);
|
||||||
|
|
||||||
|
def version_compare(value, version, operator='eq', strict=False):
|
||||||
|
''' Perform a version comparison on a value '''
|
||||||
|
op_map = {
|
||||||
|
'==': 'eq', '=': 'eq', 'eq': 'eq',
|
||||||
|
'<': 'lt', 'lt': 'lt',
|
||||||
|
'<=': 'le', 'le': 'le',
|
||||||
|
'>': 'gt', 'gt': 'gt',
|
||||||
|
'>=': 'ge', 'ge': 'ge',
|
||||||
|
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
|
||||||
|
}
|
||||||
|
|
||||||
|
if strict:
|
||||||
|
Version = StrictVersion
|
||||||
|
else:
|
||||||
|
Version = LooseVersion
|
||||||
|
|
||||||
|
if operator in op_map:
|
||||||
|
operator = op_map[operator]
|
||||||
|
else:
|
||||||
|
raise errors.AnsibleFilterError('Invalid operator type')
|
||||||
|
|
||||||
|
try:
|
||||||
|
method = getattr(py_operator, operator)
|
||||||
|
return method(Version(str(value)), Version(str(version)))
|
||||||
|
except Exception, e:
|
||||||
|
raise errors.AnsibleFilterError('Version comparison: %s' % e)
|
||||||
|
|
||||||
|
@environmentfilter
|
||||||
|
def rand(environment, end, start=None, step=None):
|
||||||
|
r = SystemRandom()
|
||||||
|
if isinstance(end, (int, long)):
|
||||||
|
if not start:
|
||||||
|
start = 0
|
||||||
|
if not step:
|
||||||
|
step = 1
|
||||||
|
return r.randrange(start, end, step)
|
||||||
|
elif hasattr(end, '__iter__'):
|
||||||
|
if start or step:
|
||||||
|
raise errors.AnsibleFilterError('start and step can only be used with integer values')
|
||||||
|
return r.choice(end)
|
||||||
|
else:
|
||||||
|
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
|
||||||
|
|
||||||
|
def randomize_list(mylist):
|
||||||
|
try:
|
||||||
|
mylist = list(mylist)
|
||||||
|
shuffle(mylist)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return mylist
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
''' Ansible core jinja2 filters '''
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
# base 64
|
||||||
|
'b64decode': base64.b64decode,
|
||||||
|
'b64encode': base64.b64encode,
|
||||||
|
|
||||||
|
# json
|
||||||
|
'to_json': to_json,
|
||||||
|
'to_nice_json': to_nice_json,
|
||||||
|
'from_json': json.loads,
|
||||||
|
|
||||||
|
# yaml
|
||||||
|
'to_yaml': yaml.safe_dump,
|
||||||
|
'to_nice_yaml': to_nice_yaml,
|
||||||
|
'from_yaml': yaml.safe_load,
|
||||||
|
|
||||||
|
# path
|
||||||
|
'basename': os.path.basename,
|
||||||
|
'dirname': os.path.dirname,
|
||||||
|
'expanduser': os.path.expanduser,
|
||||||
|
'realpath': os.path.realpath,
|
||||||
|
'relpath': os.path.relpath,
|
||||||
|
|
||||||
|
# failure testing
|
||||||
|
'failed' : failed,
|
||||||
|
'success' : success,
|
||||||
|
|
||||||
|
# changed testing
|
||||||
|
'changed' : changed,
|
||||||
|
|
||||||
|
# skip testing
|
||||||
|
'skipped' : skipped,
|
||||||
|
|
||||||
|
# variable existence
|
||||||
|
'mandatory': mandatory,
|
||||||
|
|
||||||
|
# value as boolean
|
||||||
|
'bool': bool,
|
||||||
|
|
||||||
|
# quote string for shell usage
|
||||||
|
'quote': quote,
|
||||||
|
|
||||||
|
# hash filters
|
||||||
|
# md5 hex digest of string
|
||||||
|
'md5': md5s,
|
||||||
|
# sha1 hex digeset of string
|
||||||
|
'sha1': checksum_s,
|
||||||
|
# checksum of string as used by ansible for checksuming files
|
||||||
|
'checksum': checksum_s,
|
||||||
|
|
||||||
|
# file glob
|
||||||
|
'fileglob': fileglob,
|
||||||
|
|
||||||
|
# regex
|
||||||
|
'match': match,
|
||||||
|
'search': search,
|
||||||
|
'regex': regex,
|
||||||
|
'regex_replace': regex_replace,
|
||||||
|
|
||||||
|
# list
|
||||||
|
'unique' : unique,
|
||||||
|
'intersect': intersect,
|
||||||
|
'difference': difference,
|
||||||
|
'symmetric_difference': symmetric_difference,
|
||||||
|
'union': union,
|
||||||
|
'min' : min,
|
||||||
|
'max' : max,
|
||||||
|
|
||||||
|
# version comparison
|
||||||
|
'version_compare': version_compare,
|
||||||
|
|
||||||
|
# random stuff
|
||||||
|
'random': rand,
|
||||||
|
'shuffle': randomize_list,
|
||||||
|
}
|
|
@ -51,3 +51,10 @@ class InventoryIniParser(InventoryAggregateParser):
|
||||||
def parse(self):
|
def parse(self):
|
||||||
return super(InventoryDirectoryParser, self).parse()
|
return super(InventoryDirectoryParser, self).parse()
|
||||||
|
|
||||||
|
def _before_comment(self, msg):
|
||||||
|
''' what's the part of a string before a comment? '''
|
||||||
|
msg = msg.replace("\#","**NOT_A_COMMENT**")
|
||||||
|
msg = msg.split("#")[0]
|
||||||
|
msg = msg.replace("**NOT_A_COMMENT**","#")
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from ansible import utils, errors
|
|
||||||
import os
|
|
||||||
import codecs
|
|
||||||
import csv
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def read_csv(self, filename, key, delimiter, dflt=None, col=1):
|
|
||||||
|
|
||||||
try:
|
|
||||||
f = codecs.open(filename, 'r', encoding='utf-8')
|
|
||||||
creader = csv.reader(f, delimiter=delimiter)
|
|
||||||
|
|
||||||
for row in creader:
|
|
||||||
if row[0] == key:
|
|
||||||
return row[int(col)]
|
|
||||||
except Exception, e:
|
|
||||||
raise errors.AnsibleError("csvfile: %s" % str(e))
|
|
||||||
|
|
||||||
return dflt
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
if isinstance(terms, basestring):
|
|
||||||
terms = [ terms ]
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
params = term.split()
|
|
||||||
key = params[0]
|
|
||||||
|
|
||||||
paramvals = {
|
|
||||||
'file' : 'ansible.csv',
|
|
||||||
'default' : None,
|
|
||||||
'delimiter' : "TAB",
|
|
||||||
'col' : "1", # column to return
|
|
||||||
}
|
|
||||||
|
|
||||||
# parameters specified?
|
|
||||||
try:
|
|
||||||
for param in params[1:]:
|
|
||||||
name, value = param.split('=')
|
|
||||||
assert(name in paramvals)
|
|
||||||
paramvals[name] = value
|
|
||||||
except (ValueError, AssertionError), e:
|
|
||||||
raise errors.AnsibleError(e)
|
|
||||||
|
|
||||||
if paramvals['delimiter'] == 'TAB':
|
|
||||||
paramvals['delimiter'] = "\t"
|
|
||||||
|
|
||||||
path = utils.path_dwim(self.basedir, paramvals['file'])
|
|
||||||
|
|
||||||
var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
|
|
||||||
if var is not None:
|
|
||||||
if type(var) is list:
|
|
||||||
for v in var:
|
|
||||||
ret.append(v)
|
|
||||||
else:
|
|
||||||
ret.append(var)
|
|
||||||
return ret
|
|
|
@ -1,39 +0,0 @@
|
||||||
# (c) 2014, Kent R. Spillner <kspillner@acm.org>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from ansible.utils import safe_eval
|
|
||||||
import ansible.utils as utils
|
|
||||||
import ansible.errors as errors
|
|
||||||
|
|
||||||
def flatten_hash_to_list(terms):
|
|
||||||
ret = []
|
|
||||||
for key in terms:
|
|
||||||
ret.append({'key': key, 'value': terms[key]})
|
|
||||||
return ret
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
if not isinstance(terms, dict):
|
|
||||||
raise errors.AnsibleError("with_dict expects a dict")
|
|
||||||
|
|
||||||
return flatten_hash_to_list(terms)
|
|
|
@ -1,68 +0,0 @@
|
||||||
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from ansible import utils, errors
|
|
||||||
import os
|
|
||||||
HAVE_DNS=False
|
|
||||||
try:
|
|
||||||
import dns.resolver
|
|
||||||
from dns.exception import DNSException
|
|
||||||
HAVE_DNS=True
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# ==============================================================
|
|
||||||
# DNSTXT: DNS TXT records
|
|
||||||
#
|
|
||||||
# key=domainname
|
|
||||||
# TODO: configurable resolver IPs
|
|
||||||
# --------------------------------------------------------------
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
if HAVE_DNS == False:
|
|
||||||
raise errors.AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
if isinstance(terms, basestring):
|
|
||||||
terms = [ terms ]
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
domain = term.split()[0]
|
|
||||||
string = []
|
|
||||||
try:
|
|
||||||
answers = dns.resolver.query(domain, 'TXT')
|
|
||||||
for rdata in answers:
|
|
||||||
s = rdata.to_text()
|
|
||||||
string.append(s[1:-1]) # Strip outside quotes on TXT rdata
|
|
||||||
|
|
||||||
except dns.resolver.NXDOMAIN:
|
|
||||||
string = 'NXDOMAIN'
|
|
||||||
except dns.resolver.Timeout:
|
|
||||||
string = ''
|
|
||||||
except dns.exception.DNSException, e:
|
|
||||||
raise errors.AnsibleError("dns.resolver unhandled exception", e)
|
|
||||||
|
|
||||||
ret.append(''.join(string))
|
|
||||||
return ret
|
|
|
@ -1,78 +0,0 @@
|
||||||
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from ansible import utils
|
|
||||||
import os
|
|
||||||
import urllib2
|
|
||||||
try:
|
|
||||||
import json
|
|
||||||
except ImportError:
|
|
||||||
import simplejson as json
|
|
||||||
|
|
||||||
# this can be made configurable, not should not use ansible.cfg
|
|
||||||
ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
|
|
||||||
if os.getenv('ANSIBLE_ETCD_URL') is not None:
|
|
||||||
ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
|
|
||||||
|
|
||||||
class etcd():
|
|
||||||
def __init__(self, url=ANSIBLE_ETCD_URL):
|
|
||||||
self.url = url
|
|
||||||
self.baseurl = '%s/v1/keys' % (self.url)
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
url = "%s/%s" % (self.baseurl, key)
|
|
||||||
|
|
||||||
data = None
|
|
||||||
value = ""
|
|
||||||
try:
|
|
||||||
r = urllib2.urlopen(url)
|
|
||||||
data = r.read()
|
|
||||||
except:
|
|
||||||
return value
|
|
||||||
|
|
||||||
try:
|
|
||||||
# {"action":"get","key":"/name","value":"Jane Jolie","index":5}
|
|
||||||
item = json.loads(data)
|
|
||||||
if 'value' in item:
|
|
||||||
value = item['value']
|
|
||||||
if 'errorCode' in item:
|
|
||||||
value = "ENOENT"
|
|
||||||
except:
|
|
||||||
raise
|
|
||||||
pass
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
self.etcd = etcd()
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
if isinstance(terms, basestring):
|
|
||||||
terms = [ terms ]
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
key = term.split()[0]
|
|
||||||
value = self.etcd.get(key)
|
|
||||||
ret.append(value)
|
|
||||||
return ret
|
|
|
@ -1,59 +0,0 @@
|
||||||
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from ansible import utils, errors
|
|
||||||
import os
|
|
||||||
import codecs
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
ret = []
|
|
||||||
|
|
||||||
# this can happen if the variable contains a string, strictly not desired for lookup
|
|
||||||
# plugins, but users may try it, so make it work.
|
|
||||||
if not isinstance(terms, list):
|
|
||||||
terms = [ terms ]
|
|
||||||
|
|
||||||
for term in terms:
|
|
||||||
basedir_path = utils.path_dwim(self.basedir, term)
|
|
||||||
relative_path = None
|
|
||||||
playbook_path = None
|
|
||||||
|
|
||||||
# Special handling of the file lookup, used primarily when the
|
|
||||||
# lookup is done from a role. If the file isn't found in the
|
|
||||||
# basedir of the current file, use dwim_relative to look in the
|
|
||||||
# role/files/ directory, and finally the playbook directory
|
|
||||||
# itself (which will be relative to the current working dir)
|
|
||||||
if '_original_file' in inject:
|
|
||||||
relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', term, self.basedir, check=False)
|
|
||||||
if 'playbook_dir' in inject:
|
|
||||||
playbook_path = os.path.join(inject['playbook_dir'], term)
|
|
||||||
|
|
||||||
for path in (basedir_path, relative_path, playbook_path):
|
|
||||||
if path and os.path.exists(path):
|
|
||||||
ret.append(codecs.open(path, encoding="utf8").read().rstrip())
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise errors.AnsibleError("could not locate file in lookup: %s" % term)
|
|
||||||
|
|
||||||
return ret
|
|
|
@ -1,194 +0,0 @@
|
||||||
# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
|
|
||||||
# take a list of files and (optionally) a list of paths
|
|
||||||
# return the first existing file found in the paths
|
|
||||||
# [file1, file2, file3], [path1, path2, path3]
|
|
||||||
# search order is:
|
|
||||||
# path1/file1
|
|
||||||
# path1/file2
|
|
||||||
# path1/file3
|
|
||||||
# path2/file1
|
|
||||||
# path2/file2
|
|
||||||
# path2/file3
|
|
||||||
# path3/file1
|
|
||||||
# path3/file2
|
|
||||||
# path3/file3
|
|
||||||
|
|
||||||
# first file found with os.path.exists() is returned
|
|
||||||
# no file matches raises ansibleerror
|
|
||||||
# EXAMPLES
|
|
||||||
# - name: copy first existing file found to /some/file
|
|
||||||
# action: copy src=$item dest=/some/file
|
|
||||||
# with_first_found:
|
|
||||||
# - files: foo ${inventory_hostname} bar
|
|
||||||
# paths: /tmp/production /tmp/staging
|
|
||||||
|
|
||||||
# that will look for files in this order:
|
|
||||||
# /tmp/production/foo
|
|
||||||
# ${inventory_hostname}
|
|
||||||
# bar
|
|
||||||
# /tmp/staging/foo
|
|
||||||
# ${inventory_hostname}
|
|
||||||
# bar
|
|
||||||
|
|
||||||
# - name: copy first existing file found to /some/file
|
|
||||||
# action: copy src=$item dest=/some/file
|
|
||||||
# with_first_found:
|
|
||||||
# - files: /some/place/foo ${inventory_hostname} /some/place/else
|
|
||||||
|
|
||||||
# that will look for files in this order:
|
|
||||||
# /some/place/foo
|
|
||||||
# $relative_path/${inventory_hostname}
|
|
||||||
# /some/place/else
|
|
||||||
|
|
||||||
# example - including tasks:
|
|
||||||
# tasks:
|
|
||||||
# - include: $item
|
|
||||||
# with_first_found:
|
|
||||||
# - files: generic
|
|
||||||
# paths: tasks/staging tasks/production
|
|
||||||
# this will include the tasks in the file generic where it is found first (staging or production)
|
|
||||||
|
|
||||||
# example simple file lists
|
|
||||||
#tasks:
|
|
||||||
#- name: first found file
|
|
||||||
# action: copy src=$item dest=/etc/file.cfg
|
|
||||||
# with_first_found:
|
|
||||||
# - files: foo.${inventory_hostname} foo
|
|
||||||
|
|
||||||
|
|
||||||
# example skipping if no matched files
|
|
||||||
# First_found also offers the ability to control whether or not failing
|
|
||||||
# to find a file returns an error or not
|
|
||||||
#
|
|
||||||
#- name: first found file - or skip
|
|
||||||
# action: copy src=$item dest=/etc/file.cfg
|
|
||||||
# with_first_found:
|
|
||||||
# - files: foo.${inventory_hostname}
|
|
||||||
# skip: true
|
|
||||||
|
|
||||||
# example a role with default configuration and configuration per host
|
|
||||||
# you can set multiple terms with their own files and paths to look through.
|
|
||||||
# consider a role that sets some configuration per host falling back on a default config.
|
|
||||||
#
|
|
||||||
#- name: some configuration template
|
|
||||||
# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
|
|
||||||
# with_first_found:
|
|
||||||
# - files:
|
|
||||||
# - ${inventory_hostname}/etc/file.cfg
|
|
||||||
# paths:
|
|
||||||
# - ../../../templates.overwrites
|
|
||||||
# - ../../../templates
|
|
||||||
# - files:
|
|
||||||
# - etc/file.cfg
|
|
||||||
# paths:
|
|
||||||
# - templates
|
|
||||||
|
|
||||||
# the above will return an empty list if the files cannot be found at all
|
|
||||||
# if skip is unspecificed or if it is set to false then it will return a list
|
|
||||||
# error which can be caught bye ignore_errors: true for that action.
|
|
||||||
|
|
||||||
# finally - if you want you can use it, in place to replace first_available_file:
|
|
||||||
# you simply cannot use the - files, path or skip options. simply replace
|
|
||||||
# first_available_file with with_first_found and leave the file listing in place
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# - name: with_first_found like first_available_file
|
|
||||||
# action: copy src=$item dest=/tmp/faftest
|
|
||||||
# with_first_found:
|
|
||||||
# - ../files/foo
|
|
||||||
# - ../files/bar
|
|
||||||
# - ../files/baz
|
|
||||||
# ignore_errors: true
|
|
||||||
|
|
||||||
|
|
||||||
from ansible import utils, errors
|
|
||||||
import os
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
result = None
|
|
||||||
anydict = False
|
|
||||||
skip = False
|
|
||||||
|
|
||||||
for term in terms:
|
|
||||||
if isinstance(term, dict):
|
|
||||||
anydict = True
|
|
||||||
|
|
||||||
total_search = []
|
|
||||||
if anydict:
|
|
||||||
for term in terms:
|
|
||||||
if isinstance(term, dict):
|
|
||||||
files = term.get('files', [])
|
|
||||||
paths = term.get('paths', [])
|
|
||||||
skip = utils.boolean(term.get('skip', False))
|
|
||||||
|
|
||||||
filelist = files
|
|
||||||
if isinstance(files, basestring):
|
|
||||||
files = files.replace(',', ' ')
|
|
||||||
files = files.replace(';', ' ')
|
|
||||||
filelist = files.split(' ')
|
|
||||||
|
|
||||||
pathlist = paths
|
|
||||||
if paths:
|
|
||||||
if isinstance(paths, basestring):
|
|
||||||
paths = paths.replace(',', ' ')
|
|
||||||
paths = paths.replace(':', ' ')
|
|
||||||
paths = paths.replace(';', ' ')
|
|
||||||
pathlist = paths.split(' ')
|
|
||||||
|
|
||||||
if not pathlist:
|
|
||||||
total_search = filelist
|
|
||||||
else:
|
|
||||||
for path in pathlist:
|
|
||||||
for fn in filelist:
|
|
||||||
f = os.path.join(path, fn)
|
|
||||||
total_search.append(f)
|
|
||||||
else:
|
|
||||||
total_search.append(term)
|
|
||||||
else:
|
|
||||||
total_search = terms
|
|
||||||
|
|
||||||
for fn in total_search:
|
|
||||||
if inject and '_original_file' in inject:
|
|
||||||
# check the templates and vars directories too,
|
|
||||||
# if they exist
|
|
||||||
for roledir in ('templates', 'vars'):
|
|
||||||
path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn)
|
|
||||||
if os.path.exists(path):
|
|
||||||
return [path]
|
|
||||||
# if none of the above were found, just check the
|
|
||||||
# current filename against the basedir (this will already
|
|
||||||
# have ../files from runner, if it's a role task
|
|
||||||
path = utils.path_dwim(self.basedir, fn)
|
|
||||||
if os.path.exists(path):
|
|
||||||
return [path]
|
|
||||||
else:
|
|
||||||
if skip:
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
return [None]
|
|
||||||
|
|
|
@ -1,78 +0,0 @@
|
||||||
# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import ansible.utils as utils
|
|
||||||
import ansible.errors as errors
|
|
||||||
|
|
||||||
|
|
||||||
def check_list_of_one_list(term):
|
|
||||||
# make sure term is not a list of one (list of one..) item
|
|
||||||
# return the final non list item if so
|
|
||||||
|
|
||||||
if isinstance(term,list) and len(term) == 1:
|
|
||||||
term = term[0]
|
|
||||||
if isinstance(term,list):
|
|
||||||
term = check_list_of_one_list(term)
|
|
||||||
|
|
||||||
return term
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
|
|
||||||
def flatten(self, terms, inject):
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
term = check_list_of_one_list(term)
|
|
||||||
|
|
||||||
if term == 'None' or term == 'null':
|
|
||||||
# ignore undefined items
|
|
||||||
break
|
|
||||||
|
|
||||||
if isinstance(term, basestring):
|
|
||||||
# convert a variable to a list
|
|
||||||
term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject)
|
|
||||||
# but avoid converting a plain string to a list of one string
|
|
||||||
if term2 != [ term ]:
|
|
||||||
term = term2
|
|
||||||
|
|
||||||
if isinstance(term, list):
|
|
||||||
# if it's a list, check recursively for items that are a list
|
|
||||||
term = self.flatten(term, inject)
|
|
||||||
ret.extend(term)
|
|
||||||
else:
|
|
||||||
ret.append(term)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
# see if the string represents a list and convert to list if so
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
if not isinstance(terms, list):
|
|
||||||
raise errors.AnsibleError("with_flattened expects a list")
|
|
||||||
|
|
||||||
ret = self.flatten(terms, inject)
|
|
||||||
return ret
|
|
||||||
|
|
|
@ -1,48 +0,0 @@
|
||||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
|
||||||
# (c) 2013, Steven Dossett <sdossett@panath.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from ansible.utils import safe_eval
|
|
||||||
import ansible.utils as utils
|
|
||||||
import ansible.errors as errors
|
|
||||||
import ansible.inventory as inventory
|
|
||||||
|
|
||||||
def flatten(terms):
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
if isinstance(term, list):
|
|
||||||
ret.extend(term)
|
|
||||||
else:
|
|
||||||
ret.append(term)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
if 'runner' in kwargs:
|
|
||||||
self.host_list = kwargs['runner'].inventory.host_list
|
|
||||||
else:
|
|
||||||
raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
if not isinstance(terms, list):
|
|
||||||
raise errors.AnsibleError("with_inventory_hostnames expects a list")
|
|
||||||
return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
|
|
||||||
|
|
|
@ -15,9 +15,9 @@
|
||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from ansible.utils import safe_eval
|
#from ansible.utils import safe_eval
|
||||||
import ansible.utils as utils
|
#import ansible.utils as utils
|
||||||
import ansible.errors as errors
|
#import ansible.errors as errors
|
||||||
|
|
||||||
def flatten(terms):
|
def flatten(terms):
|
||||||
ret = []
|
ret = []
|
||||||
|
@ -34,10 +34,10 @@ class LookupModule(object):
|
||||||
self.basedir = basedir
|
self.basedir = basedir
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
def run(self, terms, inject=None, **kwargs):
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
# FIXME: this function needs to be ported still, or something like it
|
||||||
|
# where really the intention is just to template a bare variable
|
||||||
if not isinstance(terms, list) and not isinstance(terms,set):
|
# with the result being a list of terms
|
||||||
raise errors.AnsibleError("with_items expects a list or a set")
|
#terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||||
|
|
||||||
return flatten(terms)
|
return flatten(terms)
|
||||||
|
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
from ansible import utils, errors
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
|
||||||
(stdout, stderr) = p.communicate()
|
|
||||||
if p.returncode == 0:
|
|
||||||
ret.extend(stdout.splitlines())
|
|
||||||
else:
|
|
||||||
raise errors.AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
|
|
||||||
return ret
|
|
|
@ -1,73 +0,0 @@
|
||||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import ansible.utils as utils
|
|
||||||
from ansible.utils import safe_eval
|
|
||||||
import ansible.errors as errors
|
|
||||||
|
|
||||||
def flatten(terms):
|
|
||||||
ret = []
|
|
||||||
for term in terms:
|
|
||||||
if isinstance(term, list):
|
|
||||||
ret.extend(term)
|
|
||||||
elif isinstance(term, tuple):
|
|
||||||
ret.extend(term)
|
|
||||||
else:
|
|
||||||
ret.append(term)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def combine(a,b):
|
|
||||||
results = []
|
|
||||||
for x in a:
|
|
||||||
for y in b:
|
|
||||||
results.append(flatten([x,y]))
|
|
||||||
return results
|
|
||||||
|
|
||||||
class LookupModule(object):
|
|
||||||
|
|
||||||
def __init__(self, basedir=None, **kwargs):
|
|
||||||
self.basedir = basedir
|
|
||||||
|
|
||||||
def __lookup_injects(self, terms, inject):
|
|
||||||
results = []
|
|
||||||
for x in terms:
|
|
||||||
intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
|
|
||||||
results.append(intermediate)
|
|
||||||
return results
|
|
||||||
|
|
||||||
def run(self, terms, inject=None, **kwargs):
|
|
||||||
|
|
||||||
# this code is common with 'items.py' consider moving to utils if we need it again
|
|
||||||
|
|
||||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
|
||||||
terms = self.__lookup_injects(terms, inject)
|
|
||||||
|
|
||||||
my_list = terms[:]
|
|
||||||
my_list.reverse()
|
|
||||||
result = []
|
|
||||||
if len(my_list) == 0:
|
|
||||||
raise errors.AnsibleError("with_nested requires at least one element in the nested list")
|
|
||||||
result = my_list.pop()
|
|
||||||
while len(my_list) > 0:
|
|
||||||
result2 = combine(result, my_list.pop())
|
|
||||||
result = result2
|
|
||||||
new_result = []
|
|
||||||
for x in result:
|
|
||||||
new_result.append(flatten(x))
|
|
||||||
return new_result
|
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue