2012-02-29 01:08:09 +01:00
|
|
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
2012-02-24 05:28:58 +01:00
|
|
|
#
|
2012-02-29 01:08:09 +01:00
|
|
|
# This file is part of Ansible
|
|
|
|
#
|
|
|
|
# Ansible is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Ansible is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
2012-02-24 05:28:58 +01:00
|
|
|
#
|
|
|
|
|
2012-03-03 04:03:03 +01:00
|
|
|
################################################
|
|
|
|
|
2012-02-24 05:28:58 +01:00
|
|
|
import multiprocessing
|
2012-02-27 06:43:02 +01:00
|
|
|
import signal
|
2012-02-24 05:28:58 +01:00
|
|
|
import os
|
2012-04-11 01:27:19 +02:00
|
|
|
import pwd
|
2012-02-28 06:45:37 +01:00
|
|
|
import Queue
|
2012-03-03 18:25:56 +01:00
|
|
|
import random
|
2012-03-14 01:59:05 +01:00
|
|
|
import traceback
|
2012-03-14 05:34:00 +01:00
|
|
|
import tempfile
|
2012-04-26 20:34:49 +02:00
|
|
|
import time
|
2012-04-19 04:43:17 +02:00
|
|
|
import base64
|
2012-04-24 03:06:47 +02:00
|
|
|
import getpass
|
2012-05-09 01:03:51 +02:00
|
|
|
import codecs
|
2012-03-13 04:11:54 +01:00
|
|
|
|
2012-03-18 22:53:58 +01:00
|
|
|
import ansible.constants as C
|
|
|
|
import ansible.connection
|
2012-04-13 14:39:54 +02:00
|
|
|
import ansible.inventory
|
2012-03-18 22:04:07 +01:00
|
|
|
from ansible import utils
|
2012-03-18 22:16:12 +01:00
|
|
|
from ansible import errors
|
2012-03-26 01:05:27 +02:00
|
|
|
from ansible import callbacks as ans_callbacks
|
2012-03-29 02:32:04 +02:00
|
|
|
|
|
|
|
HAS_ATFORK=True
|
|
|
|
try:
|
|
|
|
from Crypto.Random import atfork
|
|
|
|
except ImportError:
|
|
|
|
HAS_ATFORK=False
|
2012-03-15 00:57:56 +01:00
|
|
|
|
2012-03-03 04:03:03 +01:00
|
|
|
################################################
|
2012-02-24 05:28:58 +01:00
|
|
|
|
2012-02-27 06:43:02 +01:00
|
|
|
def _executor_hook(job_queue, result_queue):
|
2012-02-24 05:28:58 +01:00
|
|
|
''' callback used by multiprocessing pool '''
|
2012-04-04 16:27:24 +02:00
|
|
|
|
2012-03-29 02:32:04 +02:00
|
|
|
# attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
|
|
|
|
# does not occur for everyone, some claim still occurs on newer paramiko
|
|
|
|
# this function not present in CentOS 6
|
|
|
|
if HAS_ATFORK:
|
|
|
|
atfork()
|
|
|
|
|
2012-02-27 06:43:02 +01:00
|
|
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
|
|
|
while not job_queue.empty():
|
|
|
|
try:
|
|
|
|
job = job_queue.get(block=False)
|
|
|
|
runner, host = job
|
|
|
|
result_queue.put(runner._executor(host))
|
|
|
|
except Queue.Empty:
|
|
|
|
pass
|
2012-03-26 01:05:27 +02:00
|
|
|
except:
|
|
|
|
traceback.print_exc()
|
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
################################################
|
2012-02-24 05:28:58 +01:00
|
|
|
|
|
|
|
class Runner(object):
|
|
|
|
|
2012-04-17 03:52:15 +02:00
|
|
|
def __init__(self,
|
2012-04-17 05:51:03 +02:00
|
|
|
host_list=C.DEFAULT_HOST_LIST, module_path=C.DEFAULT_MODULE_PATH,
|
2012-03-22 04:39:09 +01:00
|
|
|
module_name=C.DEFAULT_MODULE_NAME, module_args=C.DEFAULT_MODULE_ARGS,
|
2012-04-17 05:51:03 +02:00
|
|
|
forks=C.DEFAULT_FORKS, timeout=C.DEFAULT_TIMEOUT,
|
|
|
|
pattern=C.DEFAULT_PATTERN, remote_user=C.DEFAULT_REMOTE_USER,
|
|
|
|
remote_pass=C.DEFAULT_REMOTE_PASS, remote_port=C.DEFAULT_REMOTE_PORT,
|
|
|
|
sudo_pass=C.DEFAULT_SUDO_PASS, background=0, basedir=None,
|
|
|
|
setup_cache=None, transport=C.DEFAULT_TRANSPORT, conditional='True',
|
2012-05-07 00:24:04 +02:00
|
|
|
callbacks=None, debug=False, sudo=False, sudo_user=C.DEFAULT_SUDO_USER,
|
|
|
|
module_vars=None, is_playbook=False, inventory=None):
|
2012-04-17 05:45:15 +02:00
|
|
|
|
|
|
|
"""
|
|
|
|
host_list : path to a host list file, like /etc/ansible/hosts
|
|
|
|
module_path : path to modules, like /usr/share/ansible
|
|
|
|
module_name : which module to run (string)
|
|
|
|
module_args : args to pass to the module (string)
|
|
|
|
forks : desired level of paralellism (hosts to run on at a time)
|
|
|
|
timeout : connection timeout, such as a SSH timeout, in seconds
|
|
|
|
pattern : pattern or groups to select from in inventory
|
|
|
|
remote_user : connect as this remote username
|
|
|
|
remote_pass : supply this password (if not using keys)
|
|
|
|
remote_port : use this default remote port (if not set by the inventory system)
|
2012-05-04 01:08:36 +02:00
|
|
|
sudo_user : If you want to sudo to a user other than root.
|
2012-04-17 05:45:15 +02:00
|
|
|
sudo_pass : sudo password if using sudo and sudo requires a password
|
|
|
|
background : run asynchronously with a cap of this many # of seconds (if not 0)
|
|
|
|
basedir : paths used by modules if not absolute are relative to here
|
|
|
|
setup_cache : this is a internalism that is going away
|
|
|
|
transport : transport mode (paramiko, local)
|
|
|
|
conditional : only execute if this string, evaluated, is True
|
|
|
|
callbacks : output callback class
|
|
|
|
sudo : log in as remote user and immediately sudo to root
|
|
|
|
module_vars : provides additional variables to a template. FIXME: just use module_args, remove
|
|
|
|
is_playbook : indicates Runner is being used by a playbook. affects behavior in various ways.
|
|
|
|
inventory : inventory object, if host_list is not provided
|
|
|
|
"""
|
|
|
|
|
2012-03-20 03:42:31 +01:00
|
|
|
if setup_cache is None:
|
|
|
|
setup_cache = {}
|
2012-03-22 04:39:09 +01:00
|
|
|
if basedir is None:
|
|
|
|
basedir = os.getcwd()
|
2012-03-22 05:30:05 +01:00
|
|
|
|
2012-03-26 01:05:27 +02:00
|
|
|
if callbacks is None:
|
|
|
|
callbacks = ans_callbacks.DefaultRunnerCallbacks()
|
|
|
|
self.callbacks = callbacks
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
self.generated_jid = str(random.randint(0, 999999999999))
|
2012-04-11 01:17:39 +02:00
|
|
|
|
2012-05-04 02:11:21 +02:00
|
|
|
self.sudo_user = sudo_user
|
2012-04-11 01:17:39 +02:00
|
|
|
self.transport = transport
|
2012-05-04 02:11:21 +02:00
|
|
|
self.connector = ansible.connection.Connection(self, self.transport, self.sudo_user)
|
2012-03-22 05:30:05 +01:00
|
|
|
|
2012-04-13 14:39:54 +02:00
|
|
|
if inventory is None:
|
2012-04-17 05:03:04 +02:00
|
|
|
self.inventory = ansible.inventory.Inventory(host_list)
|
2012-03-22 05:30:05 +01:00
|
|
|
else:
|
2012-04-13 14:39:54 +02:00
|
|
|
self.inventory = inventory
|
2012-03-22 04:39:09 +01:00
|
|
|
|
2012-04-27 05:18:29 +02:00
|
|
|
if module_vars is None:
|
|
|
|
module_vars = {}
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
self.setup_cache = setup_cache
|
|
|
|
self.conditional = conditional
|
2012-02-25 23:16:23 +01:00
|
|
|
self.module_path = module_path
|
|
|
|
self.module_name = module_name
|
2012-04-04 16:27:24 +02:00
|
|
|
self.forks = int(forks)
|
2012-02-25 23:16:23 +01:00
|
|
|
self.pattern = pattern
|
|
|
|
self.module_args = module_args
|
2012-04-04 13:38:21 +02:00
|
|
|
self.module_vars = module_vars
|
2012-02-25 23:16:23 +01:00
|
|
|
self.timeout = timeout
|
2012-03-30 22:17:16 +02:00
|
|
|
self.debug = debug
|
2012-02-25 23:16:23 +01:00
|
|
|
self.remote_user = remote_user
|
|
|
|
self.remote_pass = remote_pass
|
2012-03-28 23:05:31 +02:00
|
|
|
self.remote_port = remote_port
|
2012-03-22 04:39:09 +01:00
|
|
|
self.background = background
|
2012-03-29 04:51:16 +02:00
|
|
|
self.basedir = basedir
|
|
|
|
self.sudo = sudo
|
2012-04-14 01:06:11 +02:00
|
|
|
self.sudo_pass = sudo_pass
|
2012-04-14 17:58:08 +02:00
|
|
|
self.is_playbook = is_playbook
|
2012-03-06 02:09:03 +01:00
|
|
|
|
2012-04-11 01:27:19 +02:00
|
|
|
euid = pwd.getpwuid(os.geteuid())[0]
|
|
|
|
if self.transport == 'local' and self.remote_user != euid:
|
|
|
|
raise Exception("User mismatch: expected %s, but is %s" % (self.remote_user, euid))
|
2012-04-13 15:30:21 +02:00
|
|
|
if type(self.module_args) not in [str, unicode, dict]:
|
2012-04-11 02:58:40 +02:00
|
|
|
raise Exception("module_args must be a string or dict: %s" % self.module_args)
|
2012-03-31 04:47:58 +02:00
|
|
|
|
2012-03-02 02:41:17 +01:00
|
|
|
self._tmp_paths = {}
|
2012-03-03 18:25:56 +01:00
|
|
|
random.seed()
|
2012-03-22 04:39:09 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-30 22:17:16 +02:00
|
|
|
def _return_from_module(self, conn, host, result, err, executed=None):
|
2012-02-27 02:29:27 +01:00
|
|
|
''' helper function to handle JSON parsing of results '''
|
2012-03-14 01:59:05 +01:00
|
|
|
|
2012-02-25 23:16:23 +01:00
|
|
|
try:
|
2012-03-21 03:29:21 +01:00
|
|
|
result = utils.parse_json(result)
|
|
|
|
if executed is not None:
|
|
|
|
result['invocation'] = executed
|
2012-03-30 22:17:16 +02:00
|
|
|
if 'stderr' in result:
|
|
|
|
err="%s%s"%(err,result['stderr'])
|
|
|
|
return [host, True, result, err]
|
2012-04-04 16:57:54 +02:00
|
|
|
except Exception, e:
|
2012-03-30 22:17:16 +02:00
|
|
|
return [host, False, "%s/%s/%s" % (str(e), result, executed), err]
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-02-25 23:16:23 +01:00
|
|
|
def _delete_remote_files(self, conn, files):
|
2012-02-27 02:29:27 +01:00
|
|
|
''' deletes one or more remote files '''
|
2012-03-14 01:59:05 +01:00
|
|
|
|
2012-03-03 18:25:56 +01:00
|
|
|
if type(files) == str:
|
|
|
|
files = [ files ]
|
2012-02-25 23:16:23 +01:00
|
|
|
for filename in files:
|
2012-04-21 17:38:39 +02:00
|
|
|
if filename.find('/tmp/') == -1:
|
2012-03-03 18:25:56 +01:00
|
|
|
raise Exception("not going to happen")
|
2012-03-30 04:58:10 +02:00
|
|
|
self._exec_command(conn, "rm -rf %s" % filename, None)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-03 18:25:56 +01:00
|
|
|
def _transfer_module(self, conn, tmp, module):
|
2012-03-22 04:39:09 +01:00
|
|
|
''' transfers a module file to the remote side to execute it, but does not execute it yet '''
|
2012-03-14 01:59:05 +01:00
|
|
|
|
2012-03-03 18:25:56 +01:00
|
|
|
outpath = self._copy_module(conn, tmp, module)
|
2012-03-30 04:58:10 +02:00
|
|
|
self._exec_command(conn, "chmod +x %s" % outpath, tmp)
|
2012-02-25 23:16:23 +01:00
|
|
|
return outpath
|
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-04-19 04:19:25 +02:00
|
|
|
def _transfer_str(self, conn, tmp, name, data):
|
|
|
|
''' transfer string to remote file '''
|
2012-03-21 00:55:04 +01:00
|
|
|
|
2012-04-19 04:19:25 +02:00
|
|
|
if type(data) == dict:
|
|
|
|
data = utils.smjson(data)
|
2012-03-31 04:28:10 +02:00
|
|
|
|
2012-04-19 04:19:25 +02:00
|
|
|
afd, afile = tempfile.mkstemp()
|
|
|
|
afo = os.fdopen(afd, 'w')
|
2012-05-09 01:03:51 +02:00
|
|
|
afo.write(data.encode("utf8"))
|
2012-04-19 04:19:25 +02:00
|
|
|
afo.flush()
|
|
|
|
afo.close()
|
2012-03-31 04:28:10 +02:00
|
|
|
|
2012-04-19 04:19:25 +02:00
|
|
|
remote = os.path.join(tmp, name)
|
|
|
|
conn.put_file(afile, remote)
|
|
|
|
os.unlink(afile)
|
|
|
|
return remote
|
2012-03-06 04:23:56 +01:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
# *****************************************************
|
2012-03-21 03:29:21 +01:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
def _add_setup_vars(self, inject, args):
|
|
|
|
''' setup module variables need special handling '''
|
|
|
|
|
2012-04-11 02:58:40 +02:00
|
|
|
is_dict = False
|
|
|
|
if type(args) == dict:
|
|
|
|
is_dict = True
|
|
|
|
|
|
|
|
# TODO: keep this as a dict through the whole path to simplify this code
|
2012-03-22 04:39:09 +01:00
|
|
|
for (k,v) in inject.iteritems():
|
2012-04-19 04:23:33 +02:00
|
|
|
if not k.startswith('facter_') and not k.startswith('ohai_') and not k.startswith('ansible_'):
|
2012-04-11 02:58:40 +02:00
|
|
|
if not is_dict:
|
|
|
|
if str(v).find(" ") != -1:
|
|
|
|
v = "\"%s\"" % v
|
|
|
|
args += " %s=%s" % (k, str(v).replace(" ","~~~"))
|
|
|
|
else:
|
|
|
|
args[k]=v
|
2012-03-22 04:47:58 +01:00
|
|
|
return args
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
# *****************************************************
|
2012-03-31 01:06:14 +02:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
def _add_setup_metadata(self, args):
|
|
|
|
''' automatically determine where to store variables for the setup module '''
|
2012-04-11 02:58:40 +02:00
|
|
|
|
|
|
|
is_dict = False
|
|
|
|
if type(args) == dict:
|
|
|
|
is_dict = True
|
|
|
|
|
|
|
|
# TODO: keep this as a dict through the whole path to simplify this code
|
|
|
|
if not is_dict:
|
|
|
|
if args.find("metadata=") == -1:
|
|
|
|
if self.remote_user == 'root':
|
|
|
|
args = "%s metadata=/etc/ansible/setup" % args
|
|
|
|
else:
|
2012-04-24 22:45:54 +02:00
|
|
|
args = "%s metadata=$HOME/.ansible/setup" % args
|
2012-04-11 02:58:40 +02:00
|
|
|
else:
|
|
|
|
if not 'metadata' in args:
|
|
|
|
if self.remote_user == 'root':
|
|
|
|
args['metadata'] = '/etc/ansible/setup'
|
|
|
|
else:
|
2012-04-24 22:45:54 +02:00
|
|
|
args['metadata'] = "$HOME/.ansible/setup"
|
2012-03-22 04:47:58 +01:00
|
|
|
return args
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
|
|
|
def _execute_module(self, conn, tmp, remote_module_path, args,
|
|
|
|
async_jid=None, async_module=None, async_limit=None):
|
|
|
|
''' runs a module that has already been transferred '''
|
|
|
|
|
|
|
|
inject = self.setup_cache.get(conn.host,{})
|
2012-04-21 18:45:37 +02:00
|
|
|
conditional = utils.double_template(self.conditional, inject, self.setup_cache)
|
2012-03-22 04:39:09 +01:00
|
|
|
if not eval(conditional):
|
2012-03-30 22:17:16 +02:00
|
|
|
return [ utils.smjson(dict(skipped=True)), None, 'skipped' ]
|
2012-03-22 04:39:09 +01:00
|
|
|
|
2012-04-17 05:03:04 +02:00
|
|
|
host_variables = self.inventory.get_variables(conn.host)
|
2012-04-13 14:39:54 +02:00
|
|
|
inject.update(host_variables)
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
if self.module_name == 'setup':
|
2012-03-22 04:47:58 +01:00
|
|
|
args = self._add_setup_vars(inject, args)
|
|
|
|
args = self._add_setup_metadata(args)
|
2012-03-06 04:23:56 +01:00
|
|
|
|
2012-04-11 02:58:40 +02:00
|
|
|
if type(args) == dict:
|
2012-04-21 18:06:54 +02:00
|
|
|
args = utils.bigjson(args)
|
2012-04-21 18:45:37 +02:00
|
|
|
args = utils.template(args, inject, self.setup_cache)
|
2012-04-11 02:58:40 +02:00
|
|
|
|
2012-03-21 03:29:21 +01:00
|
|
|
module_name_tail = remote_module_path.split("/")[-1]
|
2012-03-06 02:09:03 +01:00
|
|
|
|
2012-04-04 13:36:21 +02:00
|
|
|
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
|
2012-03-15 00:57:56 +01:00
|
|
|
if async_jid is None:
|
|
|
|
cmd = "%s %s" % (remote_module_path, argsfile)
|
|
|
|
else:
|
2012-03-22 04:39:09 +01:00
|
|
|
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
|
2012-04-02 19:29:12 +02:00
|
|
|
|
2012-03-30 22:17:16 +02:00
|
|
|
res, err = self._exec_command(conn, cmd, tmp, sudoable=True)
|
2012-04-11 02:58:40 +02:00
|
|
|
client_executed_str = "%s %s" % (module_name_tail, args.strip())
|
2012-03-30 22:17:16 +02:00
|
|
|
return ( res, err, client_executed_str )
|
2012-03-22 04:39:09 +01:00
|
|
|
|
|
|
|
# *****************************************************
|
|
|
|
|
2012-04-14 17:58:08 +02:00
|
|
|
def _save_setup_result_to_disk(self, conn, result):
|
|
|
|
''' cache results of calling setup '''
|
|
|
|
|
2012-04-14 22:08:04 +02:00
|
|
|
dest = os.path.expanduser("~/.ansible_setup_data")
|
|
|
|
user = getpass.getuser()
|
|
|
|
if user == 'root':
|
2012-04-14 17:58:08 +02:00
|
|
|
dest = "/var/lib/ansible/setup_data"
|
|
|
|
if not os.path.exists(dest):
|
|
|
|
os.makedirs(dest)
|
|
|
|
|
|
|
|
fh = open(os.path.join(dest, conn.host), "w")
|
|
|
|
fh.write(result)
|
|
|
|
fh.close()
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
def _add_result_to_setup_cache(self, conn, result):
|
|
|
|
''' allows discovered variables to be used in templates and action statements '''
|
|
|
|
|
|
|
|
host = conn.host
|
2012-04-23 20:06:14 +02:00
|
|
|
if 'ansible_facts' in result:
|
|
|
|
var_result = result['ansible_facts']
|
|
|
|
else:
|
2012-03-22 04:39:09 +01:00
|
|
|
var_result = {}
|
|
|
|
|
|
|
|
# note: do not allow variables from playbook to be stomped on
|
|
|
|
# by variables coming up from facter/ohai/etc. They
|
|
|
|
# should be prefixed anyway
|
|
|
|
if not host in self.setup_cache:
|
|
|
|
self.setup_cache[host] = {}
|
|
|
|
for (k, v) in var_result.iteritems():
|
|
|
|
if not k in self.setup_cache[host]:
|
|
|
|
self.setup_cache[host][k] = v
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-20 03:42:31 +01:00
|
|
|
def _execute_normal_module(self, conn, host, tmp, module_name):
|
2012-03-22 04:39:09 +01:00
|
|
|
''' transfer & execute a module that is not 'copy' or 'template' '''
|
2012-03-06 04:23:56 +01:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
# shell and command are the same module
|
2012-03-20 03:42:31 +01:00
|
|
|
if module_name == 'shell':
|
|
|
|
module_name = 'command'
|
2012-03-31 04:47:58 +02:00
|
|
|
self.module_args += " #USE_SHELL"
|
2012-03-15 01:40:06 +01:00
|
|
|
|
2012-03-20 03:42:31 +01:00
|
|
|
module = self._transfer_module(conn, tmp, module_name)
|
2012-03-30 22:17:16 +02:00
|
|
|
(result, err, executed) = self._execute_module(conn, tmp, module, self.module_args)
|
2012-03-06 04:23:56 +01:00
|
|
|
|
2012-04-23 20:06:14 +02:00
|
|
|
(host, ok, data, err) = self._return_from_module(conn, host, result, err, executed)
|
|
|
|
|
|
|
|
if ok:
|
|
|
|
self._add_result_to_setup_cache(conn, data)
|
2012-03-18 22:24:09 +01:00
|
|
|
|
2012-04-23 20:06:14 +02:00
|
|
|
return (host, ok, data, err)
|
2012-03-03 18:25:56 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-20 03:42:31 +01:00
|
|
|
def _execute_async_module(self, conn, host, tmp, module_name):
|
2012-03-22 04:39:09 +01:00
|
|
|
''' transfer the given module name, plus the async module, then run it '''
|
2012-03-14 01:59:05 +01:00
|
|
|
|
2012-03-19 00:25:56 +01:00
|
|
|
# hack to make the 'shell' module keyword really be executed
|
|
|
|
# by the command module
|
2012-03-20 03:42:31 +01:00
|
|
|
module_args = self.module_args
|
|
|
|
if module_name == 'shell':
|
|
|
|
module_name = 'command'
|
2012-03-31 04:47:58 +02:00
|
|
|
module_args += " #USE_SHELL"
|
2012-03-19 00:25:56 +01:00
|
|
|
|
2012-03-03 18:25:56 +01:00
|
|
|
async = self._transfer_module(conn, tmp, 'async_wrapper')
|
2012-03-20 03:42:31 +01:00
|
|
|
module = self._transfer_module(conn, tmp, module_name)
|
2012-03-30 22:17:16 +02:00
|
|
|
(result, err, executed) = self._execute_module(conn, tmp, async, module_args,
|
2012-03-15 00:57:56 +01:00
|
|
|
async_module=module,
|
|
|
|
async_jid=self.generated_jid,
|
|
|
|
async_limit=self.background
|
|
|
|
)
|
2012-03-21 03:29:21 +01:00
|
|
|
|
2012-03-30 22:17:16 +02:00
|
|
|
return self._return_from_module(conn, host, result, err, executed)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-03 04:38:55 +01:00
|
|
|
def _execute_copy(self, conn, host, tmp):
|
2012-02-25 23:16:23 +01:00
|
|
|
''' handler for file transfer operations '''
|
|
|
|
|
2012-02-27 02:29:27 +01:00
|
|
|
# load up options
|
2012-03-22 04:39:09 +01:00
|
|
|
options = utils.parse_kv(self.module_args)
|
2012-04-12 02:57:41 +02:00
|
|
|
source = options.get('src', None)
|
|
|
|
dest = options.get('dest', None)
|
2012-04-27 05:00:33 +02:00
|
|
|
if (source is None and not 'first_available_file' in self.module_vars) or dest is None:
|
2012-04-12 02:57:41 +02:00
|
|
|
return (host, True, dict(failed=True, msg="src and dest are required"), '')
|
2012-04-18 11:40:15 +02:00
|
|
|
|
|
|
|
# apply templating to source argument
|
|
|
|
inject = self.setup_cache.get(conn.host,{})
|
2012-04-27 22:43:55 +02:00
|
|
|
|
2012-04-27 05:03:14 +02:00
|
|
|
# FIXME: break duplicate code up into subfunction
|
2012-04-27 22:43:55 +02:00
|
|
|
# if we have first_available_file in our vars
|
|
|
|
# look up the files and use the first one we find as src
|
|
|
|
if 'first_available_file' in self.module_vars:
|
|
|
|
found = False
|
|
|
|
for fn in self.module_vars.get('first_available_file'):
|
|
|
|
fn = utils.template(fn, inject, self.setup_cache)
|
|
|
|
if os.path.exists(fn):
|
|
|
|
source = fn
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
if not found:
|
2012-04-27 05:00:33 +02:00
|
|
|
return (host, True, dict(failed=True, msg="could not find src in first_available_file list"), '')
|
2012-04-27 22:43:55 +02:00
|
|
|
|
2012-04-21 18:45:37 +02:00
|
|
|
source = utils.template(source, inject, self.setup_cache)
|
2012-04-18 11:40:15 +02:00
|
|
|
|
2012-02-27 02:29:27 +01:00
|
|
|
# transfer the file to a remote tmp location
|
2012-03-22 04:39:09 +01:00
|
|
|
tmp_src = tmp + source.split('/')[-1]
|
2012-03-29 04:51:16 +02:00
|
|
|
conn.put_file(utils.path_dwim(self.basedir, source), tmp_src)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
|
|
|
# install the copy module
|
|
|
|
self.module_name = 'copy'
|
2012-03-03 18:25:56 +01:00
|
|
|
module = self._transfer_module(conn, tmp, 'copy')
|
2012-02-25 23:16:23 +01:00
|
|
|
|
|
|
|
# run the copy module
|
2012-03-31 04:47:58 +02:00
|
|
|
args = "src=%s dest=%s" % (tmp_src, dest)
|
2012-03-30 22:17:16 +02:00
|
|
|
(result1, err, executed) = self._execute_module(conn, tmp, module, args)
|
|
|
|
(host, ok, data, err) = self._return_from_module(conn, host, result1, err, executed)
|
2012-03-16 03:32:14 +01:00
|
|
|
|
|
|
|
if ok:
|
2012-03-30 22:17:16 +02:00
|
|
|
return self._chain_file_module(conn, tmp, data, err, options, executed)
|
2012-03-16 03:32:14 +01:00
|
|
|
else:
|
2012-03-30 22:17:16 +02:00
|
|
|
return (host, ok, data, err)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-04-11 05:19:23 +02:00
|
|
|
def _execute_fetch(self, conn, host, tmp):
|
|
|
|
''' handler for fetch operations '''
|
|
|
|
|
2012-04-12 02:12:01 +02:00
|
|
|
# load up options
|
2012-04-11 05:19:23 +02:00
|
|
|
options = utils.parse_kv(self.module_args)
|
2012-04-12 02:57:41 +02:00
|
|
|
source = options.get('src', None)
|
|
|
|
dest = options.get('dest', None)
|
|
|
|
if source is None or dest is None:
|
|
|
|
return (host, True, dict(failed=True, msg="src and dest are required"), '')
|
2012-04-11 18:14:10 +02:00
|
|
|
|
2012-05-08 16:11:32 +02:00
|
|
|
# apply templating to source argument
|
|
|
|
inject = self.setup_cache.get(conn.host,{})
|
|
|
|
source = utils.template(source, inject, self.setup_cache)
|
|
|
|
|
2012-04-12 02:12:01 +02:00
|
|
|
# files are saved in dest dir, with a subdir for each host, then the filename
|
2012-04-19 03:12:48 +02:00
|
|
|
dest = "%s/%s/%s" % (utils.path_dwim(self.basedir, dest), host, source)
|
|
|
|
dest = dest.replace("//","/")
|
2012-04-11 05:19:23 +02:00
|
|
|
|
2012-05-08 16:11:32 +02:00
|
|
|
# apply templating to dest argument
|
|
|
|
dest = utils.template(dest, inject, self.setup_cache)
|
|
|
|
|
2012-04-12 02:12:01 +02:00
|
|
|
# compare old and new md5 for support of change hooks
|
2012-04-11 05:19:23 +02:00
|
|
|
local_md5 = None
|
|
|
|
if os.path.exists(dest):
|
|
|
|
local_md5 = os.popen("md5sum %s" % dest).read().split()[0]
|
|
|
|
remote_md5 = self._exec_command(conn, "md5sum %s" % source, tmp, True)[0].split()[0]
|
|
|
|
|
|
|
|
if remote_md5 != local_md5:
|
2012-04-12 02:12:01 +02:00
|
|
|
# create the containing directories, if needed
|
2012-04-11 18:14:10 +02:00
|
|
|
os.makedirs(os.path.dirname(dest))
|
2012-04-12 02:12:01 +02:00
|
|
|
# fetch the file and check for changes
|
2012-04-11 05:19:23 +02:00
|
|
|
conn.fetch_file(source, dest)
|
|
|
|
new_md5 = os.popen("md5sum %s" % dest).read().split()[0]
|
|
|
|
if new_md5 != remote_md5:
|
2012-04-12 02:12:01 +02:00
|
|
|
return (host, True, dict(failed=True, msg="md5 mismatch", md5sum=new_md5), '')
|
|
|
|
return (host, True, dict(changed=True, md5sum=new_md5), '')
|
2012-04-11 05:19:23 +02:00
|
|
|
else:
|
2012-04-12 02:12:01 +02:00
|
|
|
return (host, True, dict(changed=False, md5sum=local_md5), '')
|
2012-04-11 05:19:23 +02:00
|
|
|
|
|
|
|
|
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-30 22:17:16 +02:00
|
|
|
def _chain_file_module(self, conn, tmp, data, err, options, executed):
|
2012-03-22 04:39:09 +01:00
|
|
|
''' handles changing file attribs after copy/template operations '''
|
|
|
|
|
|
|
|
old_changed = data.get('changed', False)
|
|
|
|
module = self._transfer_module(conn, tmp, 'file')
|
2012-03-31 04:47:58 +02:00
|
|
|
args = ' '.join([ "%s=%s" % (k,v) for (k,v) in options.items() ])
|
2012-03-30 22:17:16 +02:00
|
|
|
(result2, err2, executed2) = self._execute_module(conn, tmp, module, args)
|
|
|
|
results2 = self._return_from_module(conn, conn.host, result2, err2, executed)
|
|
|
|
(host, ok, data2, err2) = results2
|
2012-04-24 22:21:01 +02:00
|
|
|
if ok:
|
|
|
|
new_changed = data2.get('changed', False)
|
|
|
|
data.update(data2)
|
|
|
|
else:
|
|
|
|
new_changed = False
|
2012-03-22 04:39:09 +01:00
|
|
|
if old_changed or new_changed:
|
|
|
|
data['changed'] = True
|
2012-03-30 22:17:16 +02:00
|
|
|
return (host, ok, data, "%s%s"%(err,err2))
|
2012-03-22 04:39:09 +01:00
|
|
|
|
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-03 04:38:55 +01:00
|
|
|
def _execute_template(self, conn, host, tmp):
|
2012-02-25 23:16:23 +01:00
|
|
|
''' handler for template operations '''
|
|
|
|
|
2012-02-27 02:29:27 +01:00
|
|
|
# load up options
|
2012-03-22 04:39:09 +01:00
|
|
|
options = utils.parse_kv(self.module_args)
|
2012-04-12 02:57:41 +02:00
|
|
|
source = options.get('src', None)
|
|
|
|
dest = options.get('dest', None)
|
2012-03-06 04:23:56 +01:00
|
|
|
metadata = options.get('metadata', None)
|
2012-04-27 05:00:33 +02:00
|
|
|
if (source is None and 'first_available_file' not in self.module_vars) or dest is None:
|
2012-04-12 02:57:41 +02:00
|
|
|
return (host, True, dict(failed=True, msg="src and dest are required"), '')
|
2012-03-06 04:23:56 +01:00
|
|
|
|
2012-04-19 04:43:17 +02:00
|
|
|
# apply templating to source argument so vars can be used in the path
|
2012-04-18 14:26:33 +02:00
|
|
|
inject = self.setup_cache.get(conn.host,{})
|
2012-04-27 22:43:55 +02:00
|
|
|
|
|
|
|
# if we have first_available_file in our vars
|
|
|
|
# look up the files and use the first one we find as src
|
|
|
|
if 'first_available_file' in self.module_vars:
|
|
|
|
found = False
|
|
|
|
for fn in self.module_vars.get('first_available_file'):
|
|
|
|
fn = utils.template(fn, inject, self.setup_cache)
|
|
|
|
if os.path.exists(fn):
|
|
|
|
source = fn
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
if not found:
|
2012-04-27 05:00:33 +02:00
|
|
|
return (host, True, dict(failed=True, msg="could not find src in first_available_file list"), '')
|
2012-04-27 22:43:55 +02:00
|
|
|
|
2012-04-21 18:45:37 +02:00
|
|
|
source = utils.template(source, inject, self.setup_cache)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-04-19 04:19:25 +02:00
|
|
|
(host, ok, data, err) = (None, None, None, None)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-04-19 04:19:25 +02:00
|
|
|
if not self.is_playbook:
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-04-19 04:43:17 +02:00
|
|
|
# not running from a playbook so we have to fetch the remote
|
|
|
|
# setup file contents before proceeding...
|
|
|
|
if metadata is None:
|
|
|
|
if self.remote_user == 'root':
|
|
|
|
metadata = '/etc/ansible/setup'
|
|
|
|
else:
|
2012-04-20 13:54:38 +02:00
|
|
|
# path is expanded on remote side
|
|
|
|
metadata = "~/.ansible/setup"
|
2012-04-19 04:43:17 +02:00
|
|
|
|
2012-04-19 04:19:25 +02:00
|
|
|
# install the template module
|
2012-04-19 04:43:17 +02:00
|
|
|
slurp_module = self._transfer_module(conn, tmp, 'slurp')
|
|
|
|
|
|
|
|
# run the slurp module to get the metadata file
|
|
|
|
args = "src=%s" % metadata
|
|
|
|
(result1, err, executed) = self._execute_module(conn, tmp, slurp_module, args)
|
|
|
|
result1 = utils.json_loads(result1)
|
|
|
|
if not 'content' in result1 or result1.get('encoding','base64') != 'base64':
|
|
|
|
result1['failed'] = True
|
|
|
|
return self._return_from_module(conn, host, result1, err, executed)
|
|
|
|
content = base64.b64decode(result1['content'])
|
|
|
|
inject = utils.json_loads(content)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
|
|
|
# install the template module
|
2012-04-19 04:43:17 +02:00
|
|
|
copy_module = self._transfer_module(conn, tmp, 'copy')
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-04-19 04:43:17 +02:00
|
|
|
# template the source data locally
|
2012-05-09 01:03:51 +02:00
|
|
|
source_data = codecs.open(utils.path_dwim(self.basedir, source), encoding="utf8").read()
|
2012-04-19 04:43:17 +02:00
|
|
|
resultant = ''
|
|
|
|
try:
|
2012-05-09 16:13:20 +02:00
|
|
|
resultant = utils.template(source_data, inject, self.setup_cache, no_engine=False)
|
2012-04-19 04:43:17 +02:00
|
|
|
except Exception, e:
|
|
|
|
return (host, False, dict(failed=True, msg=str(e)), '')
|
|
|
|
xfered = self._transfer_str(conn, tmp, 'source', resultant)
|
2012-04-19 04:19:25 +02:00
|
|
|
|
2012-04-19 04:43:17 +02:00
|
|
|
# run the COPY module
|
|
|
|
args = "src=%s dest=%s" % (xfered, dest)
|
|
|
|
(result1, err, executed) = self._execute_module(conn, tmp, copy_module, args)
|
2012-03-30 22:17:16 +02:00
|
|
|
(host, ok, data, err) = self._return_from_module(conn, host, result1, err, executed)
|
2012-04-19 04:19:25 +02:00
|
|
|
|
2012-04-19 04:43:17 +02:00
|
|
|
# modify file attribs if needed
|
2012-03-16 03:32:14 +01:00
|
|
|
if ok:
|
2012-04-27 05:00:33 +02:00
|
|
|
executed = executed.replace("copy","template",1)
|
2012-03-30 22:17:16 +02:00
|
|
|
return self._chain_file_module(conn, tmp, data, err, options, executed)
|
2012-03-16 03:32:14 +01:00
|
|
|
else:
|
2012-03-30 22:17:16 +02:00
|
|
|
return (host, ok, data, err)
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
2012-02-25 23:16:23 +01:00
|
|
|
|
|
|
|
def _executor(self, host):
|
2012-03-26 01:05:27 +02:00
|
|
|
try:
|
2012-03-30 22:17:16 +02:00
|
|
|
(host, ok, data, err) = self._executor_internal(host)
|
2012-03-27 03:17:11 +02:00
|
|
|
if not ok:
|
2012-03-27 03:25:43 +02:00
|
|
|
self.callbacks.on_unreachable(host, data)
|
2012-03-27 03:17:11 +02:00
|
|
|
return (host, ok, data)
|
2012-03-26 01:05:27 +02:00
|
|
|
except errors.AnsibleError, ae:
|
|
|
|
msg = str(ae)
|
|
|
|
self.callbacks.on_unreachable(host, msg)
|
|
|
|
return [host, False, msg]
|
|
|
|
except Exception:
|
|
|
|
msg = traceback.format_exc()
|
|
|
|
self.callbacks.on_unreachable(host, msg)
|
|
|
|
return [host, False, msg]
|
|
|
|
|
|
|
|
def _executor_internal(self, host):
|
2012-03-22 04:39:09 +01:00
|
|
|
''' callback executed in parallel for each host. returns (hostname, connected_ok, extra) '''
|
2012-03-20 03:42:31 +01:00
|
|
|
|
2012-04-17 05:03:04 +02:00
|
|
|
host_variables = self.inventory.get_variables(host)
|
2012-04-17 03:52:15 +02:00
|
|
|
port = host_variables.get('ansible_ssh_port', self.remote_port)
|
|
|
|
|
|
|
|
conn = None
|
|
|
|
try:
|
|
|
|
conn = self.connector.connect(host, port)
|
|
|
|
except errors.AnsibleConnectionFailed, e:
|
|
|
|
return [ host, False, "FAILED: %s" % str(e), None ]
|
|
|
|
|
2012-03-20 03:42:31 +01:00
|
|
|
cache = self.setup_cache.get(host, {})
|
2012-04-21 18:45:37 +02:00
|
|
|
module_name = utils.template(self.module_name, cache, self.setup_cache)
|
2012-03-09 06:19:55 +01:00
|
|
|
|
2012-03-03 04:38:55 +01:00
|
|
|
tmp = self._get_tmp_path(conn)
|
|
|
|
result = None
|
2012-03-29 04:51:16 +02:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
if self.module_name == 'copy':
|
2012-03-03 04:38:55 +01:00
|
|
|
result = self._execute_copy(conn, host, tmp)
|
2012-04-11 05:19:23 +02:00
|
|
|
elif self.module_name == 'fetch':
|
|
|
|
result = self._execute_fetch(conn, host, tmp)
|
2012-02-25 23:16:23 +01:00
|
|
|
elif self.module_name == 'template':
|
2012-03-03 04:38:55 +01:00
|
|
|
result = self._execute_template(conn, host, tmp)
|
2012-02-25 23:16:23 +01:00
|
|
|
else:
|
2012-03-22 04:39:09 +01:00
|
|
|
if self.background == 0:
|
|
|
|
result = self._execute_normal_module(conn, host, tmp, module_name)
|
|
|
|
else:
|
|
|
|
result = self._execute_async_module(conn, host, tmp, module_name)
|
2012-03-11 23:40:35 +01:00
|
|
|
|
2012-03-03 04:38:55 +01:00
|
|
|
self._delete_remote_files(conn, tmp)
|
|
|
|
conn.close()
|
2012-03-26 01:05:27 +02:00
|
|
|
|
2012-03-30 22:17:16 +02:00
|
|
|
(host, connect_ok, data, err) = result
|
2012-03-26 01:05:27 +02:00
|
|
|
if not connect_ok:
|
|
|
|
self.callbacks.on_unreachable(host, data)
|
|
|
|
else:
|
|
|
|
if 'failed' in data or 'rc' in data and str(data['rc']) != '0':
|
|
|
|
self.callbacks.on_failed(host, data)
|
|
|
|
elif 'skipped' in data:
|
|
|
|
self.callbacks.on_skipped(host)
|
|
|
|
else:
|
|
|
|
self.callbacks.on_ok(host, data)
|
|
|
|
|
2012-04-13 03:18:09 +02:00
|
|
|
if err:
|
|
|
|
if self.debug or data.get('parsed', True) == False:
|
|
|
|
self.callbacks.on_error(host, err)
|
2012-03-30 22:17:16 +02:00
|
|
|
|
2012-03-03 04:38:55 +01:00
|
|
|
return result
|
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-30 04:58:10 +02:00
|
|
|
def _exec_command(self, conn, cmd, tmp, sudoable=False):
|
2012-02-27 02:29:27 +01:00
|
|
|
''' execute a command string over SSH, return the output '''
|
2012-05-04 02:11:21 +02:00
|
|
|
sudo_user = self.sudo_user
|
2012-05-07 00:24:04 +02:00
|
|
|
stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable)
|
2012-04-27 07:25:38 +02:00
|
|
|
err=None
|
|
|
|
out=None
|
2012-03-30 22:17:16 +02:00
|
|
|
if type(stderr) != str:
|
|
|
|
err="\n".join(stderr.readlines())
|
|
|
|
else:
|
|
|
|
err=stderr
|
2012-03-29 04:51:16 +02:00
|
|
|
if type(stdout) != str:
|
2012-04-27 07:25:38 +02:00
|
|
|
out="\n".join(stdout.readlines())
|
2012-03-29 04:51:16 +02:00
|
|
|
else:
|
2012-04-27 07:25:38 +02:00
|
|
|
out=stdout
|
|
|
|
return (out,err)
|
|
|
|
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-02-27 23:52:37 +01:00
|
|
|
def _get_tmp_path(self, conn):
|
2012-02-27 02:29:27 +01:00
|
|
|
''' gets a temporary path on a remote box '''
|
2012-03-14 01:59:05 +01:00
|
|
|
|
2012-04-24 22:45:54 +02:00
|
|
|
# The problem with this is that it's executed on the
|
|
|
|
# overlord, not on the target so we can't use tempdir and os.path
|
|
|
|
# Only support the *nix world for now by using the $HOME env var
|
|
|
|
|
2012-04-21 17:38:39 +02:00
|
|
|
basetmp = "/var/tmp"
|
|
|
|
if self.remote_user != 'root':
|
2012-04-24 22:45:54 +02:00
|
|
|
basetmp = "$HOME/.ansible/tmp"
|
|
|
|
cmd = "mktemp -d %s/ansible.XXXXXX" % basetmp
|
2012-04-21 17:38:39 +02:00
|
|
|
if self.remote_user != 'root':
|
2012-04-21 18:06:54 +02:00
|
|
|
cmd = "mkdir -p %s && %s" % (basetmp, cmd)
|
2012-04-21 17:38:39 +02:00
|
|
|
|
|
|
|
result, err = self._exec_command(conn, cmd, None, sudoable=False)
|
2012-03-29 04:51:16 +02:00
|
|
|
cleaned = result.split("\n")[0].strip() + '/'
|
|
|
|
return cleaned
|
|
|
|
|
2012-02-27 23:52:37 +01:00
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-03 18:25:56 +01:00
|
|
|
def _copy_module(self, conn, tmp, module):
|
2012-02-27 02:29:27 +01:00
|
|
|
''' transfer a module over SFTP, does not run it '''
|
2012-03-14 01:59:05 +01:00
|
|
|
|
2012-03-13 01:53:10 +01:00
|
|
|
if module.startswith("/"):
|
2012-03-18 22:16:12 +01:00
|
|
|
raise errors.AnsibleFileNotFound("%s is not a module" % module)
|
2012-03-14 01:59:05 +01:00
|
|
|
in_path = os.path.expanduser(os.path.join(self.module_path, module))
|
2012-03-13 01:53:10 +01:00
|
|
|
if not os.path.exists(in_path):
|
2012-03-18 22:16:12 +01:00
|
|
|
raise errors.AnsibleFileNotFound("module not found: %s" % in_path)
|
2012-03-13 01:53:10 +01:00
|
|
|
|
2012-03-03 18:25:56 +01:00
|
|
|
out_path = tmp + module
|
2012-03-10 19:35:46 +01:00
|
|
|
conn.put_file(in_path, out_path)
|
2012-02-25 23:16:23 +01:00
|
|
|
return out_path
|
|
|
|
|
2012-03-14 01:59:05 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
def _parallel_exec(self, hosts):
|
|
|
|
''' handles mulitprocessing when more than 1 fork is required '''
|
2012-04-04 16:27:24 +02:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
job_queue = multiprocessing.Manager().Queue()
|
|
|
|
[job_queue.put(i) for i in hosts]
|
|
|
|
|
2012-03-26 01:05:27 +02:00
|
|
|
result_queue = multiprocessing.Manager().Queue()
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
workers = []
|
|
|
|
for i in range(self.forks):
|
|
|
|
prc = multiprocessing.Process(target=_executor_hook,
|
|
|
|
args=(job_queue, result_queue))
|
|
|
|
prc.start()
|
|
|
|
workers.append(prc)
|
2012-04-04 16:27:24 +02:00
|
|
|
|
2012-04-04 02:20:55 +02:00
|
|
|
try:
|
|
|
|
for worker in workers:
|
|
|
|
worker.join()
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
for worker in workers:
|
|
|
|
worker.terminate()
|
|
|
|
worker.join()
|
2012-02-25 23:16:23 +01:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
results = []
|
|
|
|
while not result_queue.empty():
|
|
|
|
results.append(result_queue.get(block=False))
|
|
|
|
return results
|
|
|
|
|
|
|
|
# *****************************************************
|
|
|
|
|
|
|
|
def _partition_results(self, results):
|
|
|
|
''' seperate results by ones we contacted & ones we didn't '''
|
|
|
|
|
|
|
|
results2 = dict(contacted={}, dark={})
|
|
|
|
|
2012-03-26 01:05:27 +02:00
|
|
|
if results is None:
|
|
|
|
return None
|
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
for result in results:
|
|
|
|
(host, contacted_ok, result) = result
|
|
|
|
if contacted_ok:
|
2012-02-25 23:16:23 +01:00
|
|
|
results2["contacted"][host] = result
|
2012-03-22 04:39:09 +01:00
|
|
|
else:
|
|
|
|
results2["dark"][host] = result
|
|
|
|
|
|
|
|
# hosts which were contacted but never got a chance to return
|
2012-04-13 14:39:54 +02:00
|
|
|
for host in self.inventory.list_hosts(self.pattern):
|
2012-03-22 04:39:09 +01:00
|
|
|
if not (host in results2['dark'] or host in results2['contacted']):
|
2012-02-27 06:43:02 +01:00
|
|
|
results2["dark"][host] = {}
|
2012-03-22 04:39:09 +01:00
|
|
|
|
2012-02-25 23:16:23 +01:00
|
|
|
return results2
|
2012-02-24 05:28:58 +01:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
# *****************************************************
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
''' xfer & run module on all matched hosts '''
|
|
|
|
|
|
|
|
# find hosts that match the pattern
|
2012-04-13 14:39:54 +02:00
|
|
|
hosts = self.inventory.list_hosts(self.pattern)
|
2012-03-22 04:39:09 +01:00
|
|
|
if len(hosts) == 0:
|
2012-04-12 03:05:46 +02:00
|
|
|
self.callbacks.on_no_hosts()
|
2012-03-22 04:39:09 +01:00
|
|
|
return dict(contacted={}, dark={})
|
2012-03-22 05:30:05 +01:00
|
|
|
|
2012-03-22 04:39:09 +01:00
|
|
|
hosts = [ (self,x) for x in hosts ]
|
2012-03-26 01:05:27 +02:00
|
|
|
results = None
|
2012-03-22 04:39:09 +01:00
|
|
|
if self.forks > 1:
|
2012-03-22 04:47:58 +01:00
|
|
|
results = self._parallel_exec(hosts)
|
2012-03-22 04:39:09 +01:00
|
|
|
else:
|
2012-03-26 01:05:27 +02:00
|
|
|
results = [ self._executor(h[1]) for h in hosts ]
|
2012-03-22 04:39:09 +01:00
|
|
|
return self._partition_results(results)
|
|
|
|
|
2012-04-26 20:34:49 +02:00
|
|
|
def runAsync(self, time_limit):
|
|
|
|
''' Run this module asynchronously and return a poller. '''
|
|
|
|
self.background = time_limit
|
|
|
|
results = self.run()
|
2012-02-24 05:28:58 +01:00
|
|
|
|
2012-04-26 20:34:49 +02:00
|
|
|
return results, AsyncPoller(results, self)
|
|
|
|
|
|
|
|
class AsyncPoller(object):
|
|
|
|
""" Manage asynchronous jobs. """
|
|
|
|
|
|
|
|
def __init__(self, results, runner):
|
|
|
|
self.runner = runner
|
|
|
|
|
|
|
|
self.results = { 'contacted': {}, 'dark': {}}
|
|
|
|
self.hosts_to_poll = []
|
|
|
|
self.completed = False
|
|
|
|
|
|
|
|
# Get job id and which hosts to poll again in the future
|
|
|
|
jid = None
|
|
|
|
for (host, res) in results['contacted'].iteritems():
|
|
|
|
if res.get('started', False):
|
|
|
|
self.hosts_to_poll.append(host)
|
|
|
|
jid = res.get('ansible_job_id', None)
|
|
|
|
else:
|
|
|
|
self.results['contacted'][host] = res
|
|
|
|
for (host, res) in results['dark'].iteritems():
|
|
|
|
self.results['dark'][host] = res
|
|
|
|
|
|
|
|
if jid is None:
|
|
|
|
raise errors.AnsibleError("unexpected error: unable to determine jid")
|
|
|
|
if len(self.hosts_to_poll)==0:
|
|
|
|
raise errors.AnsibleErrot("unexpected error: no hosts to poll")
|
|
|
|
self.jid = jid
|
|
|
|
|
|
|
|
def poll(self):
|
|
|
|
""" Poll the job status.
|
|
|
|
|
|
|
|
Returns the changes in this iteration."""
|
|
|
|
self.runner.module_name = 'async_status'
|
|
|
|
self.runner.module_args = "jid=%s" % self.jid
|
|
|
|
self.runner.pattern = "*"
|
|
|
|
self.runner.background = 0
|
|
|
|
|
|
|
|
self.runner.inventory.restrict_to(self.hosts_to_poll)
|
|
|
|
results = self.runner.run()
|
|
|
|
self.runner.inventory.lift_restriction()
|
|
|
|
|
|
|
|
hosts = []
|
|
|
|
poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
|
|
|
|
for (host, res) in results['contacted'].iteritems():
|
|
|
|
if res.get('started',False):
|
|
|
|
hosts.append(host)
|
|
|
|
poll_results['polled'][host] = res
|
|
|
|
else:
|
|
|
|
self.results['contacted'][host] = res
|
|
|
|
poll_results['contacted'][host] = res
|
|
|
|
if 'failed' in res:
|
|
|
|
self.runner.callbacks.on_async_failed(host, res, self.jid)
|
|
|
|
else:
|
|
|
|
self.runner.callbacks.on_async_ok(host, res, self.jid)
|
|
|
|
for (host, res) in results['dark'].iteritems():
|
|
|
|
self.results['dark'][host] = res
|
|
|
|
poll_results['dark'][host] = res
|
|
|
|
self.runner.callbacks.on_async_failed(host, res, self.jid)
|
|
|
|
|
|
|
|
self.hosts_to_poll = hosts
|
|
|
|
if len(hosts)==0:
|
|
|
|
self.completed = True
|
|
|
|
|
|
|
|
return poll_results
|
|
|
|
|
|
|
|
def wait(self, seconds, poll_interval):
|
|
|
|
""" Wait a certain time for job completion, check status every poll_interval. """
|
|
|
|
clock = seconds - poll_interval
|
|
|
|
while (clock >= 0 and not self.completed):
|
|
|
|
time.sleep(poll_interval)
|
|
|
|
|
|
|
|
poll_results = self.poll()
|
|
|
|
|
|
|
|
for (host, res) in poll_results['polled'].iteritems():
|
|
|
|
if res.get('started'):
|
|
|
|
self.runner.callbacks.on_async_poll(host, res, self.jid, clock)
|
|
|
|
|
|
|
|
clock = clock - poll_interval
|
|
|
|
|
|
|
|
return self.results
|