From c05970df2cde542bbbdfa48575e1063f8cfcf250 Mon Sep 17 00:00:00 2001 From: Codey Oxley Date: Tue, 15 Sep 2015 00:53:34 -0700 Subject: [PATCH 001/590] Added NSoT Inventory script to pull from Device resources --- contrib/inventory/nsot.py | 345 ++++++++++++++++++++++++++++++++++++ contrib/inventory/nsot.yaml | 22 +++ 2 files changed, 367 insertions(+) create mode 100644 contrib/inventory/nsot.py create mode 100644 contrib/inventory/nsot.yaml diff --git a/contrib/inventory/nsot.py b/contrib/inventory/nsot.py new file mode 100644 index 0000000000..b72cac779c --- /dev/null +++ b/contrib/inventory/nsot.py @@ -0,0 +1,345 @@ +#!/bin/env python2.7 + +''' +nsot +==== + +Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox + +Features +-------- + +* Define host groups in form of NSoT device attribute criteria + +* All parameters defined by the spec as of 2015-09-05 are supported. + + + ``--list``: Returns JSON hash of host groups -> hosts and top-level + ``_meta`` -> ``hostvars`` which correspond to all device attributes. + + Group vars can be specified in the YAML configuration, noted below. + + + ``--host ``: Returns JSON hash where every item is a device + attribute. + +* In addition to all attributes assigned to resource being returned, script + will also append ``site_id`` and ``id`` as facts to utilize. + + +Confguration +------------ + +Since it'd be annoying and failure prone to guess where you're configuration +file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it. + +This file should adhere to the YAML spec. All top-level variable must be +desired Ansible group-name hashed with single 'query' item to define the NSoT +attribute query. + +Queries follow the normal NSoT query syntax, `shown here`_ + +.. _shown here: https://github.com/dropbox/pynsot#set-queries + +.. code:: yaml + + routers: + query: 'deviceType=ROUTER' + vars: + a: b + c: d + + juniper_fw: + query: 'deviceType=FIREWALL manufacturer=JUNIPER' + + not_f10: + query: '-manufacturer=FORCE10' + +The inventory will automatically use your ``.pynsotrc`` like normal pynsot from +cli would, so make sure that's configured appropriately. + +.. note:: + + Attributes I'm showing above are influenced from ones that the Trigger + project likes. As is the spirit of NSoT, use whichever attributes work best + for your workflow. + +If config file is blank or absent, the following default groups will be +created: + +* ``routers``: deviceType=ROUTER +* ``switches``: deviceType=SWITCH +* ``firewalls``: deviceType=FIREWALL + +These are likely not useful for everyone so please use the configuration. :) + +.. note:: + + By default, resources will only be returned for what your default + site is set for in your ``~/.pynsotrc``. + + If you want to specify, add an extra key under the group for ``site: n``. + +Output Examples +--------------- + +Here are some examples shown from just calling the command directly:: + + $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.' + { + "routers": { + "hosts": [ + "test1.example.com" + ], + "vars": { + "cool_level": "very", + "group": "routers" + } + }, + "firewalls": { + "hosts": [ + "test2.example.com" + ], + "vars": { + "cool_level": "enough", + "group": "firewalls" + } + }, + "_meta": { + "hostvars": { + "test2.example.com": { + "make": "SRX", + "site_id": 1, + "id": 108 + }, + "test1.example.com": { + "make": "MX80", + "site_id": 1, + "id": 107 + } + } + }, + "rtr_and_fw": { + "hosts": [ + "test1.example.com", + "test2.example.com" + ], + "vars": {} + } + } + + + $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.' + { + "make": "MX80", + "site_id": 1, + "id": 107 + } + +''' + +from __future__ import print_function +import sys +import os +import pkg_resources +import argparse +import json +import yaml +from textwrap import dedent +from pynsot.client import get_api_client +from pynsot.app import HttpServerError +from click.exceptions import UsageError + +# Version source of truth is in setup.py +__version__ = pkg_resources.require('ansible_nsot')[0].version + + +def warning(*objs): + print("WARNING: ", *objs, file=sys.stderr) + + +class NSoTInventory(object): + '''NSoT Client object for gather inventory''' + + def __init__(self): + self.config = dict() + config_env = os.environ.get('NSOT_INVENTORY_CONFIG') + if config_env: + try: + config_file = os.path.abspath(config_env) + except IOError: # If file non-existent, use default config + self._config_default() + except Exception as e: + sys.exit('%s\n' % e) + + with open(config_file) as f: + try: + self.config.update(yaml.safe_load(f)) + except TypeError: # If empty file, use default config + warning('Empty config file') + self._config_default() + except Exception as e: + sys.exit('%s\n' % e) + else: # Use defaults if env var missing + self._config_default() + self.groups = self.config.keys() + self.client = get_api_client() + self._meta = {'hostvars': dict()} + + def _config_default(self): + default_yaml = ''' + --- + routers: + query: deviceType=ROUTER + switches: + query: deviceType=SWITCH + firewalls: + query: deviceType=FIREWALL + ''' + self.config = yaml.safe_load(dedent(default_yaml)) + + def do_list(self): + '''Direct callback for when ``--list`` is provided + + Relies on the configuration generated from init to run + _inventory_group() + ''' + inventory = dict() + for group, contents in self.config.iteritems(): + group_response = self._inventory_group(group, contents) + inventory.update(group_response) + inventory.update({'_meta': self._meta}) + return json.dumps(inventory) + + def do_host(self, host): + return json.dumps(self._hostvars(host)) + + def _hostvars(self, host): + '''Return dictionary of all device attributes + + Depending on number of devices in NSoT, could be rather slow since this + has to request every device resource to filter through + ''' + device = [i for i in self.client.devices.get()['data']['devices'] + if host in i['hostname']][0] + attributes = device['attributes'] + attributes.update({'site_id': device['site_id'], 'id': device['id']}) + return attributes + + def _inventory_group(self, group, contents): + '''Takes a group and returns inventory for it as dict + + :param group: Group name + :type group: str + :param contents: The contents of the group's YAML config + :type contents: dict + + contents param should look like:: + + { + 'query': 'xx', + 'vars': + 'a': 'b' + } + + Will return something like:: + + { group: { + hosts: [], + vars: {}, + } + ''' + query = contents.get('query') + hostvars = contents.get('vars', dict()) + site = contents.get('site', dict()) + obj = {group: dict()} + obj[group]['hosts'] = [] + obj[group]['vars'] = hostvars + try: + assert isinstance(query, basestring) + except: + sys.exit('ERR: Group queries must be a single string\n' + ' Group: %s\n' + ' Query: %s\n' % (group, query) + ) + try: + if site: + site = self.client.sites(site) + devices = site.devices.query.get(query=query) + else: + devices = self.client.devices.query.get(query=query) + except HttpServerError as e: + if '500' in str(e.response): + _site = 'Correct site id?' + _attr = 'Queried attributes actually exist?' + questions = _site + '\n' + _attr + sys.exit('ERR: 500 from server.\n%s' % questions) + else: + raise + except UsageError: + sys.exit('ERR: Could not connect to server. Running?') + + # Would do a list comprehension here, but would like to save code/time + # and also acquire attributes in this step + for host in devices['data']['devices']: + # Iterate through each device that matches query, assign hostname + # to the group's hosts array and then use this single iteration as + # a chance to update self._meta which will be used in the final + # return + hostname = host['hostname'] + obj[group]['hosts'].append(hostname) + attributes = host['attributes'] + attributes.update({'site_id': host['site_id'], 'id': host['id']}) + self._meta['hostvars'].update({hostname: attributes}) + + return obj + + +def parse_args(): + desc = __doc__.splitlines()[4] # Just to avoid being redundant + + # Establish parser with options and error out if no action provided + parser = argparse.ArgumentParser( + description=desc, + version=__version__, + conflict_handler='resolve', + ) + + # Arguments + # + # Currently accepting (--list | -l) and (--host | -h) + # These must not be allowed together + parser.add_argument( + '--list', '-l', + help='Print JSON object containing hosts to STDOUT', + action='store_true', + dest='list_', # Avoiding syntax highlighting for list + ) + + parser.add_argument( + '--host', '-h', + help='Print JSON object containing hostvars for ', + action='store', + ) + args = parser.parse_args() + + if not args.list_ and not args.host: # Require at least one option + parser.exit(status=1, message='No action requested') + + if args.list_ and args.host: # Do not allow multiple options + parser.exit(status=1, message='Too many actions requested') + + return args + + +def main(): + '''Set up argument handling and callback routing''' + args = parse_args() + client = NSoTInventory() + + # Callback condition + if args.list_: + print(client.do_list()) + elif args.host: + print(client.do_host(args.host)) + +if __name__ == '__main__': + main() diff --git a/contrib/inventory/nsot.yaml b/contrib/inventory/nsot.yaml new file mode 100644 index 0000000000..ebddbc8234 --- /dev/null +++ b/contrib/inventory/nsot.yaml @@ -0,0 +1,22 @@ +--- +juniper_routers: + query: 'deviceType=ROUTER manufacturer=JUNIPER' + vars: + group: juniper_routers + netconf: true + os: junos + +cisco_asa: + query: 'manufacturer=CISCO deviceType=FIREWALL' + vars: + group: cisco_asa + routed_vpn: false + stateful: true + +old_cisco_asa: + query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+' + vars: + old_nat: true + +not_f10: + query: '-manufacturer=FORCE10' From 15915bb3caa16ed05051e4606691950249ec0165 Mon Sep 17 00:00:00 2001 From: Codey Oxley Date: Tue, 15 Sep 2015 08:53:38 -0700 Subject: [PATCH 002/590] NSoT Inventory: python2.7 -> python shebang --- contrib/inventory/nsot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/inventory/nsot.py b/contrib/inventory/nsot.py index b72cac779c..611e3cc0d0 100644 --- a/contrib/inventory/nsot.py +++ b/contrib/inventory/nsot.py @@ -1,4 +1,4 @@ -#!/bin/env python2.7 +#!/bin/env python ''' nsot From 113c4350e39dc37194b20389fe2933dd73381aa4 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Thu, 8 Oct 2015 17:37:24 -0700 Subject: [PATCH 003/590] Force SSL transport for pywinrm updates, get host+group vars I PR'd a change to pywinrm to allow server certs to be ignored; but it's only on the SSL transport (which we were previously ignoring). For this to work more generally, we're also now pulling the named ansible_winrm_* args from the merged set of host/group vars, not just host_vars. --- lib/ansible/plugins/connection/winrm.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index ee8616f5d8..5daa9f9d23 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -48,6 +48,7 @@ from ansible.plugins.connection import ConnectionBase from ansible.plugins import shell_loader from ansible.utils.path import makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode, to_str +from ansible.utils.vars import combine_vars class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' @@ -76,7 +77,7 @@ class Connection(ConnectionBase): ''' Override WinRM-specific options from host variables. ''' - host_vars = host.get_vars() + host_vars = combine_vars(host.get_group_vars(), host.get_vars()) self._winrm_host = self._play_context.remote_addr self._winrm_port = int(self._play_context.port or 5986) @@ -91,10 +92,12 @@ class Connection(ConnectionBase): self._winrm_realm = None self._winrm_realm = host_vars.get('ansible_winrm_realm', self._winrm_realm) or None + transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext' + if HAVE_KERBEROS and ('@' in self._winrm_user or self._winrm_realm): - self._winrm_transport = 'kerberos,plaintext' + self._winrm_transport = 'kerberos,%s' % transport_selector else: - self._winrm_transport = 'plaintext' + self._winrm_transport = transport_selector self._winrm_transport = host_vars.get('ansible_winrm_transport', self._winrm_transport) if isinstance(self._winrm_transport, basestring): self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()] From 647b92a79bfc6db92e84e0da162a9e17d32672f7 Mon Sep 17 00:00:00 2001 From: Joern Heissler Date: Sun, 8 Nov 2015 13:08:44 +0100 Subject: [PATCH 004/590] Use ansible_host in synchronize module Fixes #13073 --- lib/ansible/plugins/action/synchronize.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index 2670cc9290..9bf12132ed 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -131,7 +131,13 @@ class ActionModule(ActionBase): src_host = '127.0.0.1' inventory_hostname = task_vars.get('inventory_hostname') dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname) - dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname) + try: + dest_host = dest_host_inventory_vars['ansible_host'] + except KeyError: + try: + dest_host = dest_host_inventory_vars['ansible_ssh_host'] + except KeyError: + dest_host = inventory_hostname dest_is_local = dest_host in C.LOCALHOST From 27398131cf31eb7ca834a30ea2d8a871a937a377 Mon Sep 17 00:00:00 2001 From: Etherdaemon Date: Fri, 6 Nov 2015 09:15:45 +1000 Subject: [PATCH 005/590] Fixes #13010 by updating boto3 methods to update boto3_conn to check for profile_name and if detected, remove from params and set a new variable of profile which is passed into Session separately --- lib/ansible/module_utils/ec2.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index ac799772c2..4cbf2130b1 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -41,20 +41,25 @@ except: def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): + profile = params.pop('profile_name', None) + params['aws_session_token'] = params.pop('security_token', None) + params['verify'] = params.pop('validate_certs', None) + if conn_type not in ['both', 'resource', 'client']: module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call') if conn_type == 'resource': - resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params) + resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params) return resource elif conn_type == 'client': - client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params) + client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params) return client else: - resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params) - client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params) + resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params) + client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params) return client, resource + def aws_common_argument_spec(): return dict( ec2_url=dict(), @@ -153,7 +158,6 @@ def get_aws_connection_info(module, boto3=False): if profile_name: boto_params['profile_name'] = profile_name - else: boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, From 0bc32cbaeea54a0d27ab2654d4d9eb43064cf735 Mon Sep 17 00:00:00 2001 From: Florian Haas Date: Thu, 12 Nov 2015 21:19:40 +0100 Subject: [PATCH 006/590] Correct connection type returned by libvirt_lxc inventory script The correct connection type for LXC containers managed via libvirt is libvirt_lxc, not lxc. --- contrib/inventory/libvirt_lxc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py index 1491afd577..cb34d473cd 100755 --- a/contrib/inventory/libvirt_lxc.py +++ b/contrib/inventory/libvirt_lxc.py @@ -27,11 +27,11 @@ result['all'] = {} pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] result['all']['vars'] = {} -result['all']['vars']['ansible_connection'] = 'lxc' +result['all']['vars']['ansible_connection'] = 'libvirt_lxc' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({'ansible_connection': 'lxc'})) + print(json.dumps({'ansible_connection': 'libvirt_lxc'})) else: print("Need an argument, either --list or --host ") From 6e9cf88a87d39fbb5e8319dff59bb90820ff6981 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Sat, 14 Nov 2015 09:59:04 +0530 Subject: [PATCH 007/590] The 2.0 release has a name now --- RELEASES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASES.txt b/RELEASES.txt index 035b81dc71..cd32b0cddb 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,7 +4,7 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -2.0 "TBD" - in progress +2.0 "Over the Hills and Far Away" - in progress Released ++++++++ From 85277c8aae4e65690a4acf864822bcae0e7f99ca Mon Sep 17 00:00:00 2001 From: Jimmy Tang Date: Thu, 24 Sep 2015 16:51:51 +0100 Subject: [PATCH 008/590] Initial add of logentries callback plugin This callback plugin will generate json objects to be sent to the logentries service for auditing/debugging purposes. To use: Add this to your ansible.cfg file in the defaults block [defaults] callback_plugins = ./callback_plugins callback_stdout = logentries callback_whitelist = logentries Copy the callback plugin into the callback_plugings directory Either set the environment variables export LOGENTRIES_API=data.logentries.com export LOGENTRIES_PORT=10000 export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af Or create a logentries.ini config file that sites next to the plugin with the following contents [logentries] api = data.logentries.com port = 10000 tls_port = 20000 use_tls = no token = dd21fc88-f00a-43ff-b977-e3a4233c53af --- lib/ansible/plugins/callback/logentries.py | 336 +++++++++++++++++++++ 1 file changed, 336 insertions(+) create mode 100644 lib/ansible/plugins/callback/logentries.py diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py new file mode 100644 index 0000000000..3d5346952f --- /dev/null +++ b/lib/ansible/plugins/callback/logentries.py @@ -0,0 +1,336 @@ +""" +(c) 2015, Logentries.com +Author: Jimmy Tang + +This callback plugin will generate json objects to be sent to logentries +for auditing/debugging purposes. + +Todo: + +* Better formatting of output before sending out to logentries data/api nodes. + +To use: + +Add this to your ansible.cfg file in the defaults block + + [defaults] + callback_plugins = ./callback_plugins + callback_stdout = logentries + callback_whitelist = logentries + +Copy the callback plugin into the callback_plugings directory + +Either set the environment variables + + export LOGENTRIES_API=data.logentries.com + export LOGENTRIES_PORT=10000 + export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af + +Or create a logentries.ini config file that sites next to the plugin with the following contents + + [logentries] + api = data.logentries.com + port = 10000 + tls_port = 20000 + use_tls = no + token = dd21fc88-f00a-43ff-b977-e3a4233c53af + + +""" + +import os +import threading +import socket +import random +import time +import codecs +import Queue +import ConfigParser +import uuid +try: + import certifi +except ImportError: + print("please do 'pip install certifi'") + +try: + import flatdict +except ImportError: + print("please do 'pip install flatdict'") + +from ansible.plugins.callback import CallbackBase + + +def to_unicode(ch): + return codecs.unicode_escape_decode(ch)[0] + + +def is_unicode(ch): + return isinstance(ch, unicode) + + +def create_unicode(ch): + return unicode(ch, 'utf-8') + + +class PlainTextSocketAppender(threading.Thread): + def __init__(self, + verbose=True, + LE_API='data.logentries.com', + LE_PORT=80, + LE_TLS_PORT=443): + threading.Thread.__init__(self) + + self.QUEUE_SIZE = 32768 + self.LE_API = LE_API + self.LE_PORT = LE_PORT + self.LE_TLS_PORT = LE_TLS_PORT + self.MIN_DELAY = 0.1 + self.MAX_DELAY = 10 + # Error message displayed when an incorrect Token has been detected + self.INVALID_TOKEN = ("\n\nIt appears the LOGENTRIES_TOKEN " + "parameter you entered is incorrect!\n\n") + # Unicode Line separator character \u2028 + self.LINE_SEP = to_unicode('\u2028') + + self.daemon = True + self.verbose = verbose + self._conn = None + self._queue = Queue.Queue(self.QUEUE_SIZE) + + def empty(self): + return self._queue.empty() + + def open_connection(self): + self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._conn.connect((self.LE_API, self.LE_PORT)) + + def reopen_connection(self): + self.close_connection() + + root_delay = self.MIN_DELAY + while True: + try: + self.open_connection() + return + except Exception: + if self.verbose: + print("Unable to connect to Logentries") + + root_delay *= 2 + if (root_delay > self.MAX_DELAY): + root_delay = self.MAX_DELAY + + wait_for = root_delay + random.uniform(0, root_delay) + + try: + time.sleep(wait_for) + except KeyboardInterrupt: + raise + + def close_connection(self): + if self._conn is not None: + self._conn.close() + + def run(self): + try: + # Open connection + self.reopen_connection() + + # Send data in queue + while True: + # Take data from queue + data = self._queue.get(block=True) + + # Replace newlines with Unicode line separator + # for multi-line events + if not is_unicode(data): + multiline = create_unicode(data).replace( + '\n', self.LINE_SEP) + else: + multiline = data.replace('\n', self.LINE_SEP) + multiline += "\n" + # Send data, reconnect if needed + while True: + try: + self._conn.send(multiline.encode('utf-8')) + except socket.error: + self.reopen_connection() + continue + break + except KeyboardInterrupt: + if self.verbose: + print("Logentries asynchronous socket client interrupted") + + self.close_connection() + + +try: + import ssl +except ImportError: # for systems without TLS support. + SocketAppender = PlainTextSocketAppender + print("Unable to import ssl module. Will send over port 80.") +else: + + class TLSSocketAppender(PlainTextSocketAppender): + def open_connection(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock = ssl.wrap_socket( + sock=sock, + keyfile=None, + certfile=None, + server_side=False, + cert_reqs=ssl.CERT_REQUIRED, + ssl_version=getattr( + ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1), + ca_certs=certifi.where(), + do_handshake_on_connect=True, + suppress_ragged_eofs=True, ) + sock.connect((self.LE_API, self.LE_TLS_PORT)) + self._conn = sock + + SocketAppender = TLSSocketAppender + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'logentries' + + def __init__(self, display): + super(CallbackModule, self).__init__(display) + + config_path = os.path.abspath(os.path.dirname(__file__)) + config = ConfigParser.ConfigParser() + try: + config.readfp(open(os.path.join(config_path, 'logentries.ini'))) + if config.has_option('logentries', 'api'): + self.api_uri = config.get('logentries', 'api') + if config.has_option('logentries', 'port'): + self.api_port = config.getint('logentries', 'port') + if config.has_option('logentries', 'tls_port'): + self.api_tls_port = config.getint('logentries', 'tls_port') + if config.has_option('logentries', 'use_tls'): + self.use_tls = config.getboolean('logentries', 'use_tls') + if config.has_option('logentries', 'token'): + self.token = config.get('logentries', 'token') + except: + self.api_uri = os.getenv('LOGENTRIES_API') + if self.api_uri is None: + self.api_uri = 'data.logentries.com' + + try: + self.api_port = int(os.getenv('LOGENTRIES_PORT')) + if self.api_port is None: + self.api_port = 80 + except TypeError: + self.api_port = 80 + + try: + self.api_tls_port = int(os.getenv('LOGENTRIES_TLS_PORT')) + if self.api_tls_port is None: + self.api_tls_port = 443 + except TypeError: + self.api_tls_port = 443 + + # this just needs to be set to use TLS + self.use_tls = os.getenv('LOGENTRIES_USE_TLS') + if self.use_tls is None: + self.use_tls = False + elif self.use_tls.lower() in ['yes', 'true']: + self.use_tls = True + + self.token = os.getenv('LOGENTRIES_ANSIBLE_TOKEN') + if self.token is None: + self.disabled = True + self._display.warning( + 'Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment variable') + + self.verbose = False + self.timeout = 10 + self.le_jobid = str(uuid.uuid4()) + + if self.use_tls: + self._thread = TLSSocketAppender(verbose=self.verbose, + LE_API=self.api_uri, + LE_TLS_PORT=self.api_tls_port) + else: + self._thread = PlainTextSocketAppender(verbose=self.verbose, + LE_API=self.api_uri, + LE_PORT=self.api_port) + + def emit(self, record): + if not self._thread.is_alive(): + try: + self._thread.start() + if self.verbose: + print("Starting Logentries Asynchronous Socket Appender") + except RuntimeError: # It's already started. + if not self._thread.is_alive(): + raise + + msg = record.rstrip('\n') + msg = "{} {}".format(self.token, msg) + self._thread._queue.put(msg) + + def runner_on_ok(self, host, res): + results = {} + results['le_jobid'] = self.le_jobid + results['hostname'] = host + results['results'] = res + results['status'] = 'OK' + results = flatdict.FlatDict(results) + self.emit(self._dump_results(results)) + + def runner_on_failed(self, host, res, ignore_errors=False): + results = {} + results['le_jobid'] = self.le_jobid + results['hostname'] = host + results['results'] = res + results['status'] = 'FAILED' + results = flatdict.FlatDict(results) + self.emit(self._dump_results(results)) + + def runner_on_skipped(self, host, item=None): + results = {} + results['le_jobid'] = self.le_jobid + results['hostname'] = host + results['status'] = 'SKIPPED' + results = flatdict.FlatDict(results) + self.emit(self._dump_results(results)) + + def runner_on_unreachable(self, host, res): + results = {} + results['le_jobid'] = self.le_jobid + results['hostname'] = host + results['results'] = res + results['status'] = 'UNREACHABLE' + results = flatdict.FlatDict(results) + self.emit(self._dump_results(results)) + + def runner_on_async_failed(self, host, res, jid): + results = {} + results['le_jobid'] = self.le_jobid + results['hostname'] = host + results['results'] = res + results['jid'] = jid + results['status'] = 'ASYNC_FAILED' + results = flatdict.FlatDict(results) + self.emit(self._dump_results(results)) + + def v2_playbook_on_play_start(self, play): + results = {} + results['le_jobid'] = self.le_jobid + results['started_by'] = os.getlogin() + if play.name: + results['play'] = play.name + results['hosts'] = play.hosts + results = flatdict.FlatDict(results) + self.emit(self._dump_results(results)) + + def playbook_on_stats(self, stats): + """ flush out queue of messages """ + now = time.time() + while not self._thread.empty(): + time.sleep(0.2) + if time.time() - now > self.timeout: + break From c02ceb8f123c19caefecaa178cef9e2be7ab687a Mon Sep 17 00:00:00 2001 From: Jimmy Tang Date: Wed, 28 Oct 2015 16:31:37 +0000 Subject: [PATCH 009/590] Remove threading and queues. Added license information and cleaned up callback. --- lib/ansible/plugins/callback/logentries.py | 144 ++++++++++----------- 1 file changed, 67 insertions(+), 77 deletions(-) diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py index 3d5346952f..746c9e08ba 100644 --- a/lib/ansible/plugins/callback/logentries.py +++ b/lib/ansible/plugins/callback/logentries.py @@ -1,6 +1,19 @@ -""" -(c) 2015, Logentries.com -Author: Jimmy Tang +""" (c) 2015, Logentries.com, Jimmy Tang + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . This callback plugin will generate json objects to be sent to logentries for auditing/debugging purposes. @@ -18,7 +31,7 @@ Add this to your ansible.cfg file in the defaults block callback_stdout = logentries callback_whitelist = logentries -Copy the callback plugin into the callback_plugings directory +Copy the callback plugin into the callback_plugins directory Either set the environment variables @@ -34,17 +47,16 @@ Or create a logentries.ini config file that sites next to the plugin with the fo tls_port = 20000 use_tls = no token = dd21fc88-f00a-43ff-b977-e3a4233c53af + flatten = False """ import os -import threading import socket import random import time import codecs -import Queue import ConfigParser import uuid try: @@ -72,15 +84,13 @@ def create_unicode(ch): return unicode(ch, 'utf-8') -class PlainTextSocketAppender(threading.Thread): +class PlainTextSocketAppender(object): def __init__(self, verbose=True, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443): - threading.Thread.__init__(self) - self.QUEUE_SIZE = 32768 self.LE_API = LE_API self.LE_PORT = LE_PORT self.LE_TLS_PORT = LE_TLS_PORT @@ -92,13 +102,8 @@ class PlainTextSocketAppender(threading.Thread): # Unicode Line separator character \u2028 self.LINE_SEP = to_unicode('\u2028') - self.daemon = True self.verbose = verbose self._conn = None - self._queue = Queue.Queue(self.QUEUE_SIZE) - - def empty(self): - return self._queue.empty() def open_connection(self): self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -131,35 +136,22 @@ class PlainTextSocketAppender(threading.Thread): if self._conn is not None: self._conn.close() - def run(self): - try: - # Open connection - self.reopen_connection() - - # Send data in queue - while True: - # Take data from queue - data = self._queue.get(block=True) - - # Replace newlines with Unicode line separator - # for multi-line events - if not is_unicode(data): - multiline = create_unicode(data).replace( - '\n', self.LINE_SEP) - else: - multiline = data.replace('\n', self.LINE_SEP) - multiline += "\n" - # Send data, reconnect if needed - while True: - try: - self._conn.send(multiline.encode('utf-8')) - except socket.error: - self.reopen_connection() - continue - break - except KeyboardInterrupt: - if self.verbose: - print("Logentries asynchronous socket client interrupted") + def put(self, data): + # Replace newlines with Unicode line separator + # for multi-line events + if not is_unicode(data): + multiline = create_unicode(data).replace('\n', self.LINE_SEP) + else: + multiline = data.replace('\n', self.LINE_SEP) + multiline += "\n" + # Send data, reconnect if needed + while True: + try: + self._conn.send(multiline.encode('utf-8')) + except socket.error: + self.reopen_connection() + continue + break self.close_connection() @@ -213,6 +205,9 @@ class CallbackModule(CallbackBase): self.use_tls = config.getboolean('logentries', 'use_tls') if config.has_option('logentries', 'token'): self.token = config.get('logentries', 'token') + if config.has_option('logentries', 'flatten'): + self.flatten = config.getboolean('logentries', 'flatten') + except: self.api_uri = os.getenv('LOGENTRIES_API') if self.api_uri is None: @@ -245,32 +240,37 @@ class CallbackModule(CallbackBase): self._display.warning( 'Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment variable') + self.flatten = os.getenv('LOGENTRIES_FLATTEN') + if self.flatten is None: + self.flatten = False + elif self.flatten.lower() in ['yes', 'true']: + self.flatten = True + self.verbose = False self.timeout = 10 self.le_jobid = str(uuid.uuid4()) if self.use_tls: - self._thread = TLSSocketAppender(verbose=self.verbose, - LE_API=self.api_uri, - LE_TLS_PORT=self.api_tls_port) + self._appender = TLSSocketAppender(verbose=self.verbose, + LE_API=self.api_uri, + LE_TLS_PORT=self.api_tls_port) else: - self._thread = PlainTextSocketAppender(verbose=self.verbose, - LE_API=self.api_uri, - LE_PORT=self.api_port) + self._appender = PlainTextSocketAppender(verbose=self.verbose, + LE_API=self.api_uri, + LE_PORT=self.api_port) + self._appender.reopen_connection() + + def emit_formatted(self, record): + if self.flatten: + results = flatdict.FlatDict(record) + self.emit(self._dump_results(results)) + else: + self.emit(self._dump_results(record)) def emit(self, record): - if not self._thread.is_alive(): - try: - self._thread.start() - if self.verbose: - print("Starting Logentries Asynchronous Socket Appender") - except RuntimeError: # It's already started. - if not self._thread.is_alive(): - raise - msg = record.rstrip('\n') msg = "{} {}".format(self.token, msg) - self._thread._queue.put(msg) + self._appender.put(msg) def runner_on_ok(self, host, res): results = {} @@ -278,8 +278,7 @@ class CallbackModule(CallbackBase): results['hostname'] = host results['results'] = res results['status'] = 'OK' - results = flatdict.FlatDict(results) - self.emit(self._dump_results(results)) + self.emit_formatted(results) def runner_on_failed(self, host, res, ignore_errors=False): results = {} @@ -287,16 +286,14 @@ class CallbackModule(CallbackBase): results['hostname'] = host results['results'] = res results['status'] = 'FAILED' - results = flatdict.FlatDict(results) - self.emit(self._dump_results(results)) + self.emit_formatted(results) def runner_on_skipped(self, host, item=None): results = {} results['le_jobid'] = self.le_jobid results['hostname'] = host results['status'] = 'SKIPPED' - results = flatdict.FlatDict(results) - self.emit(self._dump_results(results)) + self.emit_formatted(results) def runner_on_unreachable(self, host, res): results = {} @@ -304,8 +301,7 @@ class CallbackModule(CallbackBase): results['hostname'] = host results['results'] = res results['status'] = 'UNREACHABLE' - results = flatdict.FlatDict(results) - self.emit(self._dump_results(results)) + self.emit_formatted(results) def runner_on_async_failed(self, host, res, jid): results = {} @@ -314,8 +310,7 @@ class CallbackModule(CallbackBase): results['results'] = res results['jid'] = jid results['status'] = 'ASYNC_FAILED' - results = flatdict.FlatDict(results) - self.emit(self._dump_results(results)) + self.emit_formatted(results) def v2_playbook_on_play_start(self, play): results = {} @@ -324,13 +319,8 @@ class CallbackModule(CallbackBase): if play.name: results['play'] = play.name results['hosts'] = play.hosts - results = flatdict.FlatDict(results) - self.emit(self._dump_results(results)) + self.emit_formatted(results) def playbook_on_stats(self, stats): - """ flush out queue of messages """ - now = time.time() - while not self._thread.empty(): - time.sleep(0.2) - if time.time() - now > self.timeout: - break + """ close connection """ + self._appender.close_connection() From 5f2f5e2b59608cec99a62e15c7a9b4fb5a63a74a Mon Sep 17 00:00:00 2001 From: Jimmy Tang Date: Sat, 14 Nov 2015 08:53:40 +0000 Subject: [PATCH 010/590] Add boilerplate and fix initialisation to match what 2.0 expects --- lib/ansible/plugins/callback/logentries.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py index 746c9e08ba..7195cca6f1 100644 --- a/lib/ansible/plugins/callback/logentries.py +++ b/lib/ansible/plugins/callback/logentries.py @@ -52,6 +52,9 @@ Or create a logentries.ini config file that sites next to the plugin with the fo """ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os import socket import random @@ -188,8 +191,8 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'logentries' - def __init__(self, display): - super(CallbackModule, self).__init__(display) + def __init__(self): + super(CallbackModule, self).__init__() config_path = os.path.abspath(os.path.dirname(__file__)) config = ConfigParser.ConfigParser() From 6cefdfe1483fbf4a872df22d763fafc341a826ac Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Sat, 14 Nov 2015 17:03:41 +0100 Subject: [PATCH 011/590] add test for changing git remote url integration test for https://github.com/ansible/ansible-modules-core/pull/721 clone a repo from one url clone an updated version of that repo from a new url make sure the remote url and the working copy are updated --- .../integration/roles/test_git/tasks/main.yml | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 831db8ea69..46f6e078ee 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -27,6 +27,8 @@ repo_submodule1: 'https://github.com/abadger/test_submodules_subm1.git' repo_submodule1_newer: 'https://github.com/abadger/test_submodules_subm1_newer.git' repo_submodule2: 'https://github.com/abadger/test_submodules_subm2.git' + repo_update_url_1: 'https://github.com/ansible-test-robinro/git-test-old' + repo_update_url_2: 'https://github.com/ansible-test-robinro/git-test-new' known_host_files: - "{{ lookup('env','HOME') }}/.ssh/known_hosts" - '/etc/ssh/ssh_known_hosts' @@ -346,3 +348,41 @@ - assert: that: '{{ submodule2.stdout_lines|length }} == 4' +# test change of repo url +# see https://github.com/ansible/ansible-modules-core/pull/721 + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: Clone example git repo + git: + repo: '{{ repo_update_url_1 }}' + dest: '{{ checkout_dir }}' + +- name: Clone repo with changed url to the same place + git: + repo: '{{ repo_update_url_2 }}' + dest: '{{ checkout_dir }}' + register: clone2 + +- assert: + that: "clone2|success" + +- name: check url updated + shell: git remote show origin | grep Fetch + register: remote_url + args: + chdir: '{{ checkout_dir }}' + +- assert: + that: + - "'git-test-new' in remote_url.stdout" + - "'git-test-old' not in remote_url.stdout" + +- name: check for new content in git-test-new + stat: path={{ checkout_dir }}/newfilename + register: repo_content + +- name: assert presence of new file in repo (i.e. working copy updated) + assert: + that: "repo_content.stat.exists" From 125370ab482a3d0179b2f8a5c473550e17daa4e0 Mon Sep 17 00:00:00 2001 From: Jimmy Tang Date: Sat, 14 Nov 2015 19:46:00 +0000 Subject: [PATCH 012/590] Run when whitelisted --- lib/ansible/plugins/callback/logentries.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py index 7195cca6f1..22980e1e4d 100644 --- a/lib/ansible/plugins/callback/logentries.py +++ b/lib/ansible/plugins/callback/logentries.py @@ -190,6 +190,7 @@ class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'logentries' + CALLBACK_NEEDS_WHITELIST = True def __init__(self): super(CallbackModule, self).__init__() From 19ba54c9fd7f51475101213f0440397e3d673e7f Mon Sep 17 00:00:00 2001 From: Jimmy Tang Date: Sat, 14 Nov 2015 19:53:26 +0000 Subject: [PATCH 013/590] Don't be fatal on import errors so plays don't fail if plugin doesn't have required dependencies --- lib/ansible/plugins/callback/logentries.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py index 22980e1e4d..bf168d68a9 100644 --- a/lib/ansible/plugins/callback/logentries.py +++ b/lib/ansible/plugins/callback/logentries.py @@ -64,13 +64,15 @@ import ConfigParser import uuid try: import certifi + HAS_CERTIFI = True except ImportError: - print("please do 'pip install certifi'") + HAS_CERTIFI = False try: import flatdict + HAS_FLATDICT = True except ImportError: - print("please do 'pip install flatdict'") + HAS_FLATDICT = False from ansible.plugins.callback import CallbackBase @@ -195,6 +197,16 @@ class CallbackModule(CallbackBase): def __init__(self): super(CallbackModule, self).__init__() + if not HAS_CERTIFI: + self.disabled =True + self.display.warning('The `certifi` python module is not installed. ' + 'Disabling the Logentries callback plugin.') + + if not HAS_FLATDICT: + self.disabled =True + self.display.warning('The `flatdict` python module is not installed. ' + 'Disabling the Logentries callback plugin.') + config_path = os.path.abspath(os.path.dirname(__file__)) config = ConfigParser.ConfigParser() try: From 0dedf444a12af27006181faed036c6feda23a5e1 Mon Sep 17 00:00:00 2001 From: George Brighton Date: Sun, 15 Nov 2015 00:02:46 +0000 Subject: [PATCH 014/590] Removed erroneous apostrophe in installation documentation --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 28bbd69151..e986ffd70f 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -52,7 +52,7 @@ This includes Red Hat, Debian, CentOS, OS X, any of the BSDs, and so on. .. note:: - As of 2.0 ansible uses a few more file handles to manage it's forks, OS X has a very low setting so if you want to use 15 or more forks + As of 2.0 ansible uses a few more file handles to manage its forks, OS X has a very low setting so if you want to use 15 or more forks you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 2048``. Or just any time you see a "Too many open files" error. From 4f3430ebebc4bfb2bacb76ec505497e9f71ab33d Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sun, 15 Nov 2015 10:45:24 +1000 Subject: [PATCH 015/590] Handle unexpected database presence Check for database presence in a nice way, rather than dropping the database. Thankfully there was a syntax error in the previous version, so no database would have been dropped. There was no check for whether it succeeded or not. --- test/integration/roles/test_mysql_db/tasks/main.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_mysql_db/tasks/main.yml b/test/integration/roles/test_mysql_db/tasks/main.yml index a059cd212a..1c2adcce8e 100644 --- a/test/integration/roles/test_mysql_db/tasks/main.yml +++ b/test/integration/roles/test_mysql_db/tasks/main.yml @@ -19,8 +19,9 @@ # ============================================================ - name: make sure the test database is not there - command: mysql "-e drop database '{{db_name}}';" - ignore_errors: True + command: mysql {{db_name}} + register: mysql_db_check + failed_when: "'1049' not in mysql_db_check.stderr" - name: test state=present for a database name (expect changed=true) mysql_db: name={{ db_name }} state=present From 1f052d5ce6e9980c4e41a572cc0992583f033abd Mon Sep 17 00:00:00 2001 From: Sebi Calbaza Date: Sun, 15 Nov 2015 13:48:40 +0200 Subject: [PATCH 016/590] fixed super invocation --- lib/ansible/plugins/lookup/consul_kv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/lookup/consul_kv.py b/lib/ansible/plugins/lookup/consul_kv.py index 47eaa71bc8..27cf3dbef3 100755 --- a/lib/ansible/plugins/lookup/consul_kv.py +++ b/lib/ansible/plugins/lookup/consul_kv.py @@ -75,7 +75,7 @@ class LookupModule(LookupBase): def __init__(self, loader=None, templar=None, **kwargs): - super(LookupBase, self).__init__(loader, templar, **kwargs) + super(LookupModule, self).__init__(loader, templar, **kwargs) self.agent_url = 'http://localhost:8500' if os.getenv('ANSIBLE_CONSUL_URL') is not None: From e174247734f28a26cd4ca9d552fd79baa3b1577f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 15 Nov 2015 07:51:36 -0800 Subject: [PATCH 017/590] evaluate includes for skipped tags cannot evaluate for include tags as underlying tasks might have them, but skips override so this should be a performance boost --- lib/ansible/playbook/block.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 201e881ef4..08e8964a1a 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -340,7 +340,9 @@ class Block(Base, Become, Conditional, Taggable): for task in target: if isinstance(task, Block): tmp_list.append(evaluate_block(task)) - elif task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars): + elif task.action == 'meta' \ + or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=allvars)) \ + or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars): tmp_list.append(task) return tmp_list From eeedaf2cbc82297ed0e7e85a9d285a9deffdccff Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 15 Nov 2015 08:43:42 -0800 Subject: [PATCH 018/590] fixed var name typo --- lib/ansible/playbook/block.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 08e8964a1a..0de5e635e7 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -341,7 +341,7 @@ class Block(Base, Become, Conditional, Taggable): if isinstance(task, Block): tmp_list.append(evaluate_block(task)) elif task.action == 'meta' \ - or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=allvars)) \ + or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=all_vars)) \ or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars): tmp_list.append(task) return tmp_list From f1db99caa75a4742ca5bfcfc0350e9101b92014c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 15 Nov 2015 10:09:24 -0800 Subject: [PATCH 019/590] updated with latest changes to default --- lib/ansible/plugins/callback/skippy.py | 54 +++++++++++++++++--------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 5ef2b6f3c1..15b7d3387c 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible import constants as C from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): @@ -33,6 +34,7 @@ class CallbackModule(CallbackBase): CALLBACK_NAME = 'skippy' def v2_runner_on_failed(self, result, ignore_errors=False): + delegated_vars = result._result.get('_ansible_delegated_vars', None) if 'exception' in result._result: if self._display.verbosity < 3: # extract just the actual error message from the exception text @@ -49,8 +51,8 @@ class CallbackModule(CallbackBase): if result._task.loop and 'results' in result._result: self._process_items(result) else: - if result._task.delegate_to: - self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), result._task.delegate_to, self._dump_results(result._result)), color='red') + if delegated_vars: + self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') else: self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') @@ -59,18 +61,18 @@ class CallbackModule(CallbackBase): def v2_runner_on_ok(self, result): + delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': - msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name) - color = 'cyan' + return elif result._result.get('changed', False): - if result._task.delegate_to is not None: - msg = "changed: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to) + if delegated_vars: + msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() color = 'yellow' else: - if result._task.delegate_to is not None: - msg = "ok: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to) + if delegated_vars: + msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() color = 'green' @@ -79,16 +81,16 @@ class CallbackModule(CallbackBase): self._process_items(result) else: - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include': + if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % (self._dump_results(result._result),) self._display.display(msg, color=color) self._handle_warnings(result._result) - def v2_runner_on_unreachable(self, result): - if result._task.delegate_to: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), result._task.delegate_to, self._dump_results(result._result)), color='red') + delegated_vars = result._result.get('_ansible_delegated_vars', None) + if delegated_vars: + self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') else: self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') @@ -126,23 +128,30 @@ class CallbackModule(CallbackBase): def v2_playbook_item_on_ok(self, result): + delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': - msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name) - color = 'cyan' + return elif result._result.get('changed', False): - msg = "changed: [%s]" % result._host.get_name() + if delegated_vars: + msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) + else: + msg = "changed: [%s]" % result._host.get_name() color = 'yellow' else: - msg = "ok: [%s]" % result._host.get_name() + if delegated_vars: + msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) + else: + msg = "ok: [%s]" % result._host.get_name() color = 'green' msg += " => (item=%s)" % (result._result['item'],) - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include': + if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) self._display.display(msg, color=color) def v2_playbook_item_on_failed(self, result): + delegated_vars = result._result.get('_ansible_delegated_vars', None) if 'exception' in result._result: if self._display.verbosity < 3: # extract just the actual error message from the exception text @@ -156,6 +165,15 @@ class CallbackModule(CallbackBase): # finally, remove the exception from the result so it's not shown every time del result._result['exception'] - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') + if delegated_vars: + self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red') + else: + self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') + self._handle_warnings(result._result) + def v2_playbook_on_include(self, included_file): + msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) + color = 'cyan' + self._display.display(msg, color='cyan') + From 2c275ee1fcef0aab6cdc4380981696939d54dae3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Nov 2015 11:29:52 -0800 Subject: [PATCH 020/590] updated callback plugins info --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed50896733..e19e830e7e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -285,6 +285,11 @@ newline being stripped you can change your playbook like this: * docker: for talking to docker containers on the ansible controller machine without using ssh. +####New Callbacks: + +* logentries: plugin to send play data to logentries service +* skippy: same as default but does not display skip messages + ###Minor changes: * Many more tests. The new API makes things more testable and we took advantage of it. From 83f7942dcf70f981e1dd8b1193418e416f358711 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Nov 2015 11:35:10 -0800 Subject: [PATCH 021/590] removed print entries, corrected display calls to match latest devel --- lib/ansible/plugins/callback/logentries.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py index bf168d68a9..281ca044c5 100644 --- a/lib/ansible/plugins/callback/logentries.py +++ b/lib/ansible/plugins/callback/logentries.py @@ -124,7 +124,7 @@ class PlainTextSocketAppender(object): return except Exception: if self.verbose: - print("Unable to connect to Logentries") + self._display.warning("Unable to connect to Logentries") root_delay *= 2 if (root_delay > self.MAX_DELAY): @@ -163,9 +163,10 @@ class PlainTextSocketAppender(object): try: import ssl + HAS_SSL=True except ImportError: # for systems without TLS support. SocketAppender = PlainTextSocketAppender - print("Unable to import ssl module. Will send over port 80.") + HAS_SSL=False else: class TLSSocketAppender(PlainTextSocketAppender): @@ -197,14 +198,17 @@ class CallbackModule(CallbackBase): def __init__(self): super(CallbackModule, self).__init__() + if not HAS_SSL: + self._display.warning("Unable to import ssl module. Will send over port 80.") + if not HAS_CERTIFI: self.disabled =True - self.display.warning('The `certifi` python module is not installed. ' + self._display.warning('The `certifi` python module is not installed. ' 'Disabling the Logentries callback plugin.') if not HAS_FLATDICT: self.disabled =True - self.display.warning('The `flatdict` python module is not installed. ' + self._display.warning('The `flatdict` python module is not installed. ' 'Disabling the Logentries callback plugin.') config_path = os.path.abspath(os.path.dirname(__file__)) @@ -253,8 +257,7 @@ class CallbackModule(CallbackBase): self.token = os.getenv('LOGENTRIES_ANSIBLE_TOKEN') if self.token is None: self.disabled = True - self._display.warning( - 'Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment variable') + self._display.warning('Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment variable') self.flatten = os.getenv('LOGENTRIES_FLATTEN') if self.flatten is None: From 89646d425373b67d50c8d8fc37869dbc713a6155 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Nov 2015 14:40:23 -0800 Subject: [PATCH 022/590] no need to be executable --- lib/ansible/plugins/lookup/consul_kv.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 lib/ansible/plugins/lookup/consul_kv.py diff --git a/lib/ansible/plugins/lookup/consul_kv.py b/lib/ansible/plugins/lookup/consul_kv.py old mode 100755 new mode 100644 From 3bb1f7a5612d36a86b23d886c8df2543e338d45f Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Mon, 16 Nov 2015 09:58:23 -0800 Subject: [PATCH 023/590] hostvars should return j2 undefined as instance, not type Looks like someone forgot to create an instance of undefined here- we were returning the undefined type object, which broke all the undefined checks. Added an integration test around add_host that will catch this (separate PR to follow) --- lib/ansible/vars/hostvars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index f56f542574..de27984039 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -77,7 +77,7 @@ class HostVars(collections.Mapping): def __getitem__(self, host_name): if host_name not in self._lookup: - return j2undefined + return j2undefined() host = self._lookup.get(host_name) data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) From 349e072a74f217f65a8921688db6480cc5e14125 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Mon, 16 Nov 2015 10:53:10 -0800 Subject: [PATCH 024/590] fix for dynamic (add_host) hosts not available in hostvars Looks like there are two pattern caches that need to be cleared for this to work- added the second one. Added integration tests for add_host to prevent future regressions. --- lib/ansible/inventory/__init__.py | 2 + test/integration/non_destructive.yml | 1 + .../roles/test_add_host/tasks/main.yml | 39 +++++++++++++++++++ 3 files changed, 42 insertions(+) create mode 100644 test/integration/roles/test_add_host/tasks/main.yml diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 4d866587da..fdcbd37e78 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -455,6 +455,8 @@ class Inventory(object): def clear_pattern_cache(self): ''' called exclusively by the add_host plugin to allow patterns to be recalculated ''' + global HOSTS_PATTERNS_CACHE + HOSTS_PATTERNS_CACHE = {} self._pattern_cache = {} def groups_for_host(self, host): diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index 668b20de95..ee30fa2315 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -41,6 +41,7 @@ - { role: test_get_url, tags: test_get_url } - { role: test_embedded_module, tags: test_embedded_module } - { role: test_uri, tags: test_uri } + - { role: test_add_host, tags: test_add_host } # Turn on test_binary when we start testing v2 #- { role: test_binary, tags: test_binary } diff --git a/test/integration/roles/test_add_host/tasks/main.yml b/test/integration/roles/test_add_host/tasks/main.yml new file mode 100644 index 0000000000..cafd6bd4eb --- /dev/null +++ b/test/integration/roles/test_add_host/tasks/main.yml @@ -0,0 +1,39 @@ +# test code for the add_host action +# (c) 2015, Matt Davis + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: add a host to the runtime inventory + add_host: + name: newdynamichost + groups: newdynamicgroup + a_var: from add_host + +- debug: msg={{hostvars['newdynamichost'].group_names}} + +- name: ensure that dynamically-added host is visible via hostvars, groups, etc (there are several caches that could break this) + assert: + that: + - hostvars['bogushost'] is not defined # there was a bug where an undefined host was a "type" instead of an instance- ensure this works before we rely on it + - hostvars['newdynamichost'] is defined + - hostvars['newdynamichost'].group_names is defined + - "'newdynamicgroup' in hostvars['newdynamichost'].group_names" + - hostvars['newdynamichost']['bogusvar'] is not defined + - hostvars['newdynamichost']['a_var'] is defined + - hostvars['newdynamichost']['a_var'] == 'from add_host' + - groups['bogusgroup'] is not defined # same check as above to ensure that bogus groups are undefined... + - groups['newdynamicgroup'] is defined + - "'newdynamichost' in groups['newdynamicgroup']" From fc7e2912f2b7f7c8c2de3c22024a88349827c0b9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 16 Nov 2015 10:37:24 -0800 Subject: [PATCH 025/590] zone connection plugin bugfixes and pipelining and sudo become methods enabled! Thanks to peinheber for helping test and debug this! --- lib/ansible/plugins/connection/zone.py | 27 ++++++-------------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/lib/ansible/plugins/connection/zone.py b/lib/ansible/plugins/connection/zone.py index f0001d3c9e..75d7db545d 100644 --- a/lib/ansible/plugins/connection/zone.py +++ b/lib/ansible/plugins/connection/zone.py @@ -45,13 +45,8 @@ class Connection(ConnectionBase): ''' Local zone based connections ''' transport = 'zone' - # Pipelining may work. Someone needs to test by setting this to True and - # having pipelining=True in their ansible.cfg - has_pipelining = False - # Some become_methods may work in v2 (sudo works for other chroot-based - # plugins while su seems to be failing). If some work, check chroot.py to - # see how to disable just some methods. - become_methods = frozenset() + has_pipelining = True + become_methods = frozenset(C.BECOME_METHODS).difference(('su',)) def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -114,13 +109,10 @@ class Connection(ConnectionBase): compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. ''' - # FIXME: previous code took pains not to invoke /bin/sh and left out - # -c. Not sure why as cmd could contain shell metachars (like - # cmd = "mkdir -p $HOME/pathname && echo $HOME/pathname") which - # probably wouldn't work without a shell. Get someone to test that - # this connection plugin works and then we can remove this note - executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' - local_cmd = [self.zlogin_cmd, self.zone, executable, '-c', cmd] + # Note: zlogin invokes a shell (just like ssh does) so we do not pass + # this through /bin/sh -c here. Instead it goes through the shell + # that zlogin selects. + local_cmd = [self.zlogin_cmd, self.zone, cmd] display.vvv("EXEC %s" % (local_cmd), host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, @@ -132,13 +124,6 @@ class Connection(ConnectionBase): ''' run a command on the zone ''' super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - # TODO: Check whether we can send the command to stdin via - # p.communicate(in_data) - # If we can, then we can change this plugin to has_pipelining=True and - # remove the error if in_data is given. - if in_data: - raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) From 7f2cae540518b6edc34213acfe1e38aa30084e25 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Mon, 16 Nov 2015 16:35:55 -0500 Subject: [PATCH 026/590] add REPLACER_SELINUX back into module_common --- lib/ansible/executor/module_common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py index 152b5dbb37..ba32273256 100644 --- a/lib/ansible/executor/module_common.py +++ b/lib/ansible/executor/module_common.py @@ -39,6 +39,7 @@ REPLACER_WINDOWS = "# POWERSHELL_COMMON" REPLACER_WINARGS = "<>" REPLACER_JSONARGS = "<>" REPLACER_VERSION = "\"<>\"" +REPLACER_SELINUX = "<>" # We could end up writing out parameters with unicode characters so we need to # specify an encoding for the python source file @@ -172,6 +173,7 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args) module_data = module_data.replace(REPLACER_WINARGS, module_args_json) module_data = module_data.replace(REPLACER_JSONARGS, module_args_json) + module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS)) if module_style == 'new': facility = C.DEFAULT_SYSLOG_FACILITY @@ -200,4 +202,3 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal module_data = b"\n".join(lines) return (module_data, module_style, shebang) - From 1f34c6b214b1f9d792e99a619b04603b83743765 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 17 Nov 2015 12:05:07 -0800 Subject: [PATCH 027/590] updated yaml syntax and gotchas specifically added example for getting strings that match boolean values --- docsite/rst/YAMLSyntax.rst | 55 +++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 76683f6ba3..ea3593d6fd 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -20,52 +20,52 @@ Each item in the list is a list of key/value pairs, commonly called a "hash" or a "dictionary". So, we need to know how to write lists and dictionaries in YAML. -There's another small quirk to YAML. All YAML files (regardless of their association with -Ansible or not) should begin with ``---``. This is part of the YAML -format and indicates the start of a document. +There's another small quirk to YAML. All YAML files (regardless of their association with Ansible or not) can optionally +begin with ``---`` and end with ``...``. This is part of the YAML format and indicates the start and end of a document. -All members of a list are lines beginning at the same indentation level starting -with a ``"- "`` (a dash and a space):: +All members of a list are lines beginning at the same indentation level starting with a ``"- "`` (a dash and a space):: --- # A list of tasty fruits - - Apple - - Orange - - Strawberry - - Mango + fruits: + - Apple + - Orange + - Strawberry + - Mango + ... A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space):: - --- # An employee record - name: Example Developer - job: Developer - skill: Elite + - martin: + name: Martin D'vloper + job: Developer + skill: Elite -Dictionaries can also be represented in an abbreviated form if you really want to:: +Dictionaries and lists can also be represented in an abbreviated form if you really want to:: --- - # An employee record - {name: Example Developer, job: Developer, skill: Elite} + employees: + - martin: {name: Martin D'vloper, job: Developer, skill: Elite} + fruits: ['Apple', 'Orange', 'Strawberry', 'Mango] .. _truthiness: -Ansible doesn't really use these too much, but you can also specify a -boolean value (true/false) in several forms:: +Ansible doesn't really use these too much, but you can also specify a boolean value (true/false) in several forms:: - --- create_key: yes needs_agent: no knows_oop: True likes_emacs: TRUE uses_cvs: false -Let's combine what we learned so far in an arbitrary YAML example. This really -has nothing to do with Ansible, but will give you a feel for the format:: + +Let's combine what we learned so far in an arbitrary YAML example. +This really has nothing to do with Ansible, but will give you a feel for the format:: --- # An employee record - name: Example Developer + name: Martin D'vloper job: Developer skill: Elite employed: True @@ -79,8 +79,7 @@ has nothing to do with Ansible, but will give you a feel for the format:: python: Elite dotnet: Lame -That's all you really need to know about YAML to start writing -`Ansible` playbooks. +That's all you really need to know about YAML to start writing `Ansible` playbooks. Gotchas ------- @@ -100,6 +99,14 @@ with a "{", YAML will think it is a dictionary, so you must quote it, like so:: foo: "{{ variable }}" +The same applies for strings that start or contain any YAML special characters `` [] {} : > | `` . + +Boolean conversion is helpful, but this can be a problem when you want a literal `yes` or other boolean values as a string. +In these cases just use quotes:: + + non_boolean: "yes" + other_string: "False" + .. seealso:: From 5cbeab5a3cbd55a7252c94c74c57ca69aade7846 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 16 Nov 2015 16:12:57 -0500 Subject: [PATCH 028/590] Performance improvements for HostVars and some bugfixes --- lib/ansible/executor/process/result.py | 6 +- lib/ansible/executor/process/worker.py | 15 +++-- lib/ansible/executor/task_queue_manager.py | 38 +++++++++-- lib/ansible/plugins/strategy/__init__.py | 14 ++-- lib/ansible/vars/__init__.py | 18 ++--- lib/ansible/vars/hostvars.py | 78 ++++++++-------------- 6 files changed, 85 insertions(+), 84 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 2d13aa44cd..cdc8875631 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -58,7 +58,7 @@ class ResultProcess(multiprocessing.Process): def _send_result(self, result): debug(u"sending result: %s" % ([text_type(x) for x in result],)) - self._final_q.put(result, block=False) + self._final_q.put(result) debug("done sending result") def _read_worker_result(self): @@ -73,7 +73,7 @@ class ResultProcess(multiprocessing.Process): try: if not rslt_q.empty(): debug("worker %d has data to read" % self._cur_worker) - result = rslt_q.get(block=False) + result = rslt_q.get() debug("got a result from worker %d: %s" % (self._cur_worker, result)) break except queue.Empty: @@ -101,7 +101,7 @@ class ResultProcess(multiprocessing.Process): try: result = self._read_worker_result() if result is None: - time.sleep(0.01) + time.sleep(0.0001) continue clean_copy = strip_internal_keys(result._result) diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index 1cc1f7df43..a1a83a5dda 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -59,11 +59,13 @@ class WorkerProcess(multiprocessing.Process): for reading later. ''' - def __init__(self, tqm, main_q, rslt_q, loader): + def __init__(self, tqm, main_q, rslt_q, hostvars_manager, loader): + super(WorkerProcess, self).__init__() # takes a task queue manager as the sole param: self._main_q = main_q self._rslt_q = rslt_q + self._hostvars = hostvars_manager self._loader = loader # dupe stdin, if we have one @@ -82,8 +84,6 @@ class WorkerProcess(multiprocessing.Process): # couldn't get stdin's fileno, so we just carry on pass - super(WorkerProcess, self).__init__() - def run(self): ''' Called when the process is started, and loops indefinitely @@ -100,14 +100,15 @@ class WorkerProcess(multiprocessing.Process): while True: task = None try: - debug("waiting for a message...") - (host, task, basedir, zip_vars, hostvars, compressed_vars, play_context, shared_loader_obj) = self._main_q.get() + #debug("waiting for work") + (host, task, basedir, zip_vars, compressed_vars, play_context, shared_loader_obj) = self._main_q.get(block=False) if compressed_vars: job_vars = json.loads(zlib.decompress(zip_vars)) else: job_vars = zip_vars - job_vars['hostvars'] = hostvars + + job_vars['hostvars'] = self._hostvars.hostvars() debug("there's work to be done! got a task/handler to work on: %s" % task) @@ -142,7 +143,7 @@ class WorkerProcess(multiprocessing.Process): debug("done sending task result") except queue.Empty: - pass + time.sleep(0.0001) except AnsibleConnectionFailure: try: if task: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 001d71e9e0..3e62cb3c99 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from multiprocessing.managers import SyncManager, DictProxy import multiprocessing import os import tempfile @@ -32,6 +33,7 @@ from ansible.executor.stats import AggregateStats from ansible.playbook.play_context import PlayContext from ansible.plugins import callback_loader, strategy_loader, module_loader from ansible.template import Templar +from ansible.vars.hostvars import HostVars try: from __main__ import display @@ -98,7 +100,7 @@ class TaskQueueManager: main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() - prc = WorkerProcess(self, main_q, rslt_q, self._loader) + prc = WorkerProcess(self, main_q, rslt_q, self._hostvars_manager, self._loader) prc.start() self._workers.append((prc, main_q, rslt_q)) @@ -173,11 +175,6 @@ class TaskQueueManager: are done with the current task). ''' - # Fork # of forks, # of hosts or serial, whichever is lowest - contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(play.hosts))] - contenders = [ v for v in contenders if v is not None and v > 0 ] - self._initialize_processes(min(contenders)) - if not self._callbacks_loaded: self.load_callbacks() @@ -187,6 +184,34 @@ class TaskQueueManager: new_play = play.copy() new_play.post_validate(templar) + class HostVarsManager(SyncManager): + pass + + hostvars = HostVars( + play=new_play, + inventory=self._inventory, + variable_manager=self._variable_manager, + loader=self._loader, + ) + + HostVarsManager.register( + 'hostvars', + callable=lambda: hostvars, + # FIXME: this is the list of exposed methods to the DictProxy object, plus our + # one special one (set_variable_manager). There's probably a better way + # to do this with a proper BaseProxy/DictProxy derivative + exposed=('set_variable_manager', '__contains__', '__delitem__', '__getitem__', + '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'), + ) + self._hostvars_manager = HostVarsManager() + self._hostvars_manager.start() + + # Fork # of forks, # of hosts or serial, whichever is lowest + contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))] + contenders = [ v for v in contenders if v is not None and v > 0 ] + self._initialize_processes(min(contenders)) + play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno()) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): @@ -221,6 +246,7 @@ class TaskQueueManager: # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) self._cleanup_processes() + self._hostvars_manager.shutdown() return play_return def cleanup(self): diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 3cdec5b573..f1f4650529 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -158,7 +158,6 @@ class StrategyBase: # hostvars out of the task variables right now, due to the fact # that they're not JSON serializable compressed_vars = False - hostvars = task_vars.pop('hostvars', None) if C.DEFAULT_VAR_COMPRESSION_LEVEL > 0: zip_vars = zlib.compress(json.dumps(task_vars), C.DEFAULT_VAR_COMPRESSION_LEVEL) compressed_vars = True @@ -170,10 +169,7 @@ class StrategyBase: zip_vars = task_vars # noqa (pyflakes false positive because task_vars is deleted in the conditional above) # and queue the task - main_q.put((host, task, self._loader.get_basedir(), zip_vars, hostvars, compressed_vars, play_context, shared_loader_obj), block=False) - - # nuke the hostvars object too, as its no longer needed - del hostvars + main_q.put((host, task, self._loader.get_basedir(), zip_vars, compressed_vars, play_context, shared_loader_obj)) self._pending_results += 1 except (EOFError, IOError, AssertionError) as e: @@ -192,7 +188,7 @@ class StrategyBase: while not self._final_q.empty() and not self._tqm._terminated: try: - result = self._final_q.get(block=False) + result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result],)) # all host status messages contain 2 entries: (msg, task_result) @@ -277,6 +273,7 @@ class StrategyBase: var_value = wrap_var(result[3]) self._variable_manager.set_nonpersistent_facts(host, {var_name: var_value}) + self._tqm._hostvars_manager.hostvars().set_variable_manager(self._variable_manager) elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] @@ -307,11 +304,12 @@ class StrategyBase: self._variable_manager.set_nonpersistent_facts(target_host, facts) else: self._variable_manager.set_host_facts(target_host, facts) + self._tqm._hostvars_manager.hostvars().set_variable_manager(self._variable_manager) else: raise AnsibleError("unknown result message received: %s" % result[0]) except Queue.Empty: - time.sleep(0.01) + time.sleep(0.0001) return ret_results @@ -327,7 +325,7 @@ class StrategyBase: while self._pending_results > 0 and not self._tqm._terminated: results = self._process_pending_results(iterator) ret_results.extend(results) - time.sleep(0.01) + time.sleep(0.0001) display.debug("no more pending results, returning what we have") return ret_results diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 26f52adfb0..c895b59f5f 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -359,15 +359,15 @@ class VariableManager: for (group_name, group) in iteritems(self._inventory.groups): variables['groups'][group_name] = [h.name for h in group.get_hosts()] - if include_hostvars: - hostvars_cache_entry = self._get_cache_entry(play=play) - if hostvars_cache_entry in HOSTVARS_CACHE: - hostvars = HOSTVARS_CACHE[hostvars_cache_entry] - else: - hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self) - HOSTVARS_CACHE[hostvars_cache_entry] = hostvars - variables['hostvars'] = hostvars - variables['vars'] = hostvars[host.get_name()] + #if include_hostvars: + # hostvars_cache_entry = self._get_cache_entry(play=play) + # if hostvars_cache_entry in HOSTVARS_CACHE: + # hostvars = HOSTVARS_CACHE[hostvars_cache_entry] + # else: + # hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self) + # HOSTVARS_CACHE[hostvars_cache_entry] = hostvars + # variables['hostvars'] = hostvars + # variables['vars'] = hostvars[host.get_name()] if play: variables['role_names'] = [r._role_name for r in play.roles] diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index de27984039..246b2c7812 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -48,74 +48,50 @@ class HostVars(collections.Mapping): def __init__(self, play, inventory, variable_manager, loader): self._lookup = dict() + self._inventory = inventory self._loader = loader self._play = play self._variable_manager = variable_manager self._cached_result = dict() - hosts = inventory.get_hosts(ignore_limits_and_restrictions=True) + def set_variable_manager(self, variable_manager): + self._variable_manager = variable_manager - # check to see if localhost is in the hosts list, as we - # may have it referenced via hostvars but if created implicitly - # it doesn't sow up in the hosts list - has_localhost = False - for host in hosts: - if host.name in C.LOCALHOST: - has_localhost = True - break - - if not has_localhost: - new_host = Host(name='localhost') - new_host.set_variable("ansible_python_interpreter", sys.executable) - new_host.set_variable("ansible_connection", "local") - new_host.address = '127.0.0.1' - hosts.append(new_host) - - for host in hosts: - self._lookup[host.name] = host + def _find_host(self, host_name): + return self._inventory.get_host(host_name) def __getitem__(self, host_name): + host = self._find_host(host_name) + if host is None: + return j2undefined - if host_name not in self._lookup: - return j2undefined() - - host = self._lookup.get(host_name) data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) + #**************************************************** + # TESTING REMOVAL OF THIS + #**************************************************** + # Since we template much later now in 2.0, it may be completely unrequired to do + # a full template of the vars returned above, which is quite costly in time when + # the result is large. # Using cache in order to avoid template call - sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() - if sha1_hash in self._cached_result: - result = self._cached_result[sha1_hash] - else: - templar = Templar(variables=data, loader=self._loader) - result = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) - self._cached_result[sha1_hash] = result - return result + #sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() + #if sha1_hash in self._cached_result: + # result = self._cached_result[sha1_hash] + #else: + # templar = Templar(variables=data, loader=self._loader) + # result = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) + # self._cached_result[sha1_hash] = result + #return result + #**************************************************** + return data def __contains__(self, host_name): - item = self.get(host_name) - if item and item is not j2undefined: - return True - return False + return self._find_host(host_name) is not None def __iter__(self): - for host in self._lookup: + for host in self._inventory.get_hosts(ignore_limits_and_restrictions=True): yield host def __len__(self): - return len(self._lookup) + return len(self._inventory.get_hosts(ignore_limits_and_restrictions=True)) - def __getstate__(self): - return dict( - loader=self._loader, - lookup=self._lookup, - play=self._play, - var_manager=self._variable_manager, - ) - - def __setstate__(self, data): - self._play = data.get('play') - self._loader = data.get('loader') - self._lookup = data.get('lookup') - self._variable_manager = data.get('var_manager') - self._cached_result = dict() From 984729016e3ea20e27c60fa5110cfca19580370f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 16 Nov 2015 17:13:55 -0500 Subject: [PATCH 029/590] Fix tag filtering on included files and add more debugging Previously, we were filtering the task list on tags for each host that was including the file, based on the idea that the variables had to include the host information. However, the top level task filtering is play-context only, which should also apply to the included tasks. Tags cannot and should not be based on hostvars. --- lib/ansible/playbook/included_file.py | 6 ++++++ lib/ansible/plugins/strategy/__init__.py | 2 ++ lib/ansible/plugins/strategy/free.py | 19 ++++++++++++------- lib/ansible/plugins/strategy/linear.py | 24 +++++++++++++++++++++--- 4 files changed, 41 insertions(+), 10 deletions(-) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index 6fc3bd5cbf..b7c0fb8175 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -24,6 +24,12 @@ import os from ansible.errors import AnsibleError from ansible.template import Templar +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + class IncludedFile: def __init__(self, filename, args, task): diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index f1f4650529..38c65552d3 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -414,6 +414,7 @@ class StrategyBase: Loads an included YAML file of tasks, applying the optional set of variables. ''' + display.debug("loading included file: %s" % included_file._filename) try: data = self._loader.load_from_file(included_file._filename) if data is None: @@ -474,6 +475,7 @@ class StrategyBase: # finally, send the callback and return the list of blocks loaded self._tqm.send_callback('v2_playbook_on_include', included_file) + display.debug("done processing included file") return block_list def run_handlers(self, iterator, play_context): diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 2d3c184a8c..e83184891d 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -156,13 +156,18 @@ class StrategyModule(StrategyBase): display.warning(str(e)) continue - for host in hosts_left: - if host in included_file._hosts: - task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task) - final_blocks = [] - for new_block in new_blocks: - final_blocks.append(new_block.filter_tagged_tasks(play_context, task_vars)) - iterator.add_tasks(host, final_blocks) + display.debug("generating all_blocks data") + all_blocks = dict((host, []) for host in hosts_left) + display.debug("done generating all_blocks data") + for new_block in new_blocks: + task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=included_file._task) + final_block = new_block.filter_tagged_tasks(play_context, task_vars) + for host in hosts_left: + if host in included_file._hosts: + all_blocks[host].append(final_block) + + for host in hosts_left: + iterator.add_tasks(host, all_blocks[host]) # pause briefly so we don't spin lock time.sleep(0.05) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 65240ef8fa..8a8d5c084a 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -264,31 +264,44 @@ class StrategyModule(StrategyBase): return False if len(included_files) > 0: + display.debug("we have included files to process") noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) + display.debug("generating all_blocks data") all_blocks = dict((host, []) for host in hosts_left) + display.debug("done generating all_blocks data") for included_file in included_files: + display.debug("processing included file: %s" % included_file._filename) # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file(included_file, iterator=iterator) + display.debug("iterating over new_blocks loaded from include file") for new_block in new_blocks: + task_vars = self._variable_manager.get_vars( + loader=self._loader, + play=iterator._play, + task=included_file._task, + ) + display.debug("filtering new block on tags") + final_block = new_block.filter_tagged_tasks(play_context, task_vars) + display.debug("done filtering new block on tags") + noop_block = Block(parent_block=task._block) noop_block.block = [noop_task for t in new_block.block] noop_block.always = [noop_task for t in new_block.always] noop_block.rescue = [noop_task for t in new_block.rescue] + for host in hosts_left: if host in included_file._hosts: - task_vars = self._variable_manager.get_vars(loader=self._loader, - play=iterator._play, host=host, task=included_file._task) - final_block = new_block.filter_tagged_tasks(play_context, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) + display.debug("done iterating over new_blocks loaded from include file") except AnsibleError as e: for host in included_file._hosts: @@ -299,9 +312,14 @@ class StrategyModule(StrategyBase): # finally go through all of the hosts and append the # accumulated blocks to their list of tasks + display.debug("extending task lists for all hosts with included blocks") + for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) + display.debug("done extending task lists") + display.debug("done processing included files") + display.debug("results queue empty") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) From 90f99f29aee79fd0e28c9f91b2e29bdcbbd8849b Mon Sep 17 00:00:00 2001 From: Brano Zarnovican Date: Fri, 13 Nov 2015 13:22:21 +0100 Subject: [PATCH 030/590] unittest cover for 'run_command' All os.*, subprocess.* is mocked to avoid side-effects. Tests are skipped in Py3, since 'run_command' is not Py3-ready, yet. --- .../module_utils/basic/test_run_command.py | 179 ++++++++++++++++++ 1 file changed, 179 insertions(+) create mode 100644 test/units/module_utils/basic/test_run_command.py diff --git a/test/units/module_utils/basic/test_run_command.py b/test/units/module_utils/basic/test_run_command.py new file mode 100644 index 0000000000..09ab14b6d2 --- /dev/null +++ b/test/units/module_utils/basic/test_run_command.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division) +__metaclass__ = type + +import errno +import sys +import time + +from ansible.compat.tests import unittest +from ansible.compat.six import StringIO, BytesIO +from ansible.compat.tests.mock import call, MagicMock, Mock, patch, sentinel + +from ansible.module_utils import basic +from ansible.module_utils.basic import AnsibleModule + +class OpenStringIO(StringIO): + """StringIO with dummy close() method + + So that you can inspect the content after close() was called. + """ + + def close(self): + pass + +@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") +class TestAnsibleModuleRunCommand(unittest.TestCase): + + def setUp(self): + + self.cmd_out = { + # os.read() is returning 'bytes', not strings + sentinel.stdout: BytesIO(), + sentinel.stderr: BytesIO(), + } + + def mock_os_read(fd, nbytes): + return self.cmd_out[fd].read(nbytes) + + def mock_select(rlist, wlist, xlist, timeout=1): + return (rlist, [], []) + + def mock_os_chdir(path): + if path == '/inaccessible': + raise OSError(errno.EPERM, "Permission denied: '/inaccessible'") + + basic.MODULE_COMPLEX_ARGS = '{}' + self.module = AnsibleModule(argument_spec=dict()) + self.module.fail_json = MagicMock(side_effect=SystemExit) + + self.os = patch('ansible.module_utils.basic.os').start() + self.os.path.expandvars.side_effect = lambda x: x + self.os.path.expanduser.side_effect = lambda x: x + self.os.environ = {'PATH': '/bin'} + self.os.getcwd.return_value = '/home/foo' + self.os.path.isdir.return_value = True + self.os.chdir.side_effect = mock_os_chdir + self.os.read.side_effect = mock_os_read + + self.subprocess = patch('ansible.module_utils.basic.subprocess').start() + self.cmd = Mock() + self.cmd.returncode = 0 + self.cmd.stdin = OpenStringIO() + self.cmd.stdout.fileno.return_value = sentinel.stdout + self.cmd.stderr.fileno.return_value = sentinel.stderr + self.subprocess.Popen.return_value = self.cmd + + self.select = patch('ansible.module_utils.basic.select').start() + self.select.select.side_effect = mock_select + + self.addCleanup(patch.stopall) + + def test_list_as_args(self): + self.module.run_command(['/bin/ls', 'a', ' b', 'c ']) + self.assertTrue(self.subprocess.Popen.called) + args, kwargs = self.subprocess.Popen.call_args + self.assertEqual(args, (['/bin/ls', 'a', ' b', 'c '], )) + self.assertEqual(kwargs['shell'], False) + + def test_str_as_args(self): + self.module.run_command('/bin/ls a " b" "c "') + self.assertTrue(self.subprocess.Popen.called) + args, kwargs = self.subprocess.Popen.call_args + self.assertEqual(args, (['/bin/ls', 'a', ' b', 'c '], )) + self.assertEqual(kwargs['shell'], False) + + def test_tuple_as_args(self): + self.assertRaises(SystemExit, self.module.run_command, ('ls', '/')) + self.assertTrue(self.module.fail_json.called) + + def test_unsafe_shell(self): + self.module.run_command('ls a " b" "c "', use_unsafe_shell=True) + self.assertTrue(self.subprocess.Popen.called) + args, kwargs = self.subprocess.Popen.call_args + self.assertEqual(args, ('ls a " b" "c "', )) + self.assertEqual(kwargs['shell'], True) + + def test_path_prefix(self): + self.module.run_command('foo', path_prefix='/opt/bin') + self.assertEqual('/opt/bin', self.os.environ['PATH'].split(':')[0]) + + def test_cwd(self): + self.os.getcwd.return_value = '/old' + self.module.run_command('/bin/ls', cwd='/new') + self.assertEqual(self.os.chdir.mock_calls, + [call('/new'), call('/old'), ]) + + def test_cwd_not_a_dir(self): + self.os.getcwd.return_value = '/old' + self.os.path.isdir.side_effect = lambda d: d != '/not-a-dir' + self.module.run_command('/bin/ls', cwd='/not-a-dir') + self.assertEqual(self.os.chdir.mock_calls, [call('/old'), ]) + + def test_cwd_inaccessible(self): + self.assertRaises(SystemExit, self.module.run_command, '/bin/ls', cwd='/inaccessible') + self.assertTrue(self.module.fail_json.called) + args, kwargs = self.module.fail_json.call_args + self.assertEqual(kwargs['rc'], errno.EPERM) + + def test_prompt_bad_regex(self): + self.assertRaises(SystemExit, self.module.run_command, 'foo', prompt_regex='[pP)assword:') + self.assertTrue(self.module.fail_json.called) + + def test_prompt_no_match(self): + self.cmd_out[sentinel.stdout] = BytesIO(b'hello') + (rc, _, _) = self.module.run_command('foo', prompt_regex='[pP]assword:') + self.assertEqual(rc, 0) + + def test_prompt_match_wo_data(self): + self.cmd_out[sentinel.stdout] = BytesIO(b'Authentication required!\nEnter password: ') + (rc, _, _) = self.module.run_command('foo', prompt_regex=r'[pP]assword:', data=None) + self.assertEqual(rc, 257) + + def test_check_rc_false(self): + self.cmd.returncode = 1 + (rc, _, _) = self.module.run_command('/bin/false', check_rc=False) + self.assertEqual(rc, 1) + + def test_check_rc_true(self): + self.cmd.returncode = 1 + self.assertRaises(SystemExit, self.module.run_command, '/bin/false', check_rc=True) + self.assertTrue(self.module.fail_json.called) + args, kwargs = self.module.fail_json.call_args + self.assertEqual(kwargs['rc'], 1) + + def test_text_stdin(self): + (rc, stdout, stderr) = self.module.run_command('/bin/foo', data='hello world') + self.assertEqual(self.cmd.stdin.getvalue(), 'hello world\n') + + def test_ascii_stdout(self): + self.cmd_out[sentinel.stdout] = BytesIO(b'hello') + (rc, stdout, stderr) = self.module.run_command('/bin/cat hello.txt') + self.assertEqual(rc, 0) + self.assertEqual(stdout, 'hello') + + def test_utf8_output(self): + self.cmd_out[sentinel.stdout] = BytesIO(u'Žarn§'.encode('utf-8')) + self.cmd_out[sentinel.stderr] = BytesIO(u'لرئيسية'.encode('utf-8')) + (rc, stdout, stderr) = self.module.run_command('/bin/something_ugly') + self.assertEqual(rc, 0) + self.assertEqual(stdout.decode('utf-8'), u'Žarn§') + self.assertEqual(stderr.decode('utf-8'), u'لرئيسية') + From 9f31c073fe00dbdbfff99756bb7877e00d6079de Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 17 Nov 2015 09:15:10 -0500 Subject: [PATCH 031/590] Fixing a few bugs in the HostVars performance areas * Also refresh inventory in the HostVars manager process when things are changed via add_host/group_by * Raise j2undefined rather than return it --- lib/ansible/executor/task_queue_manager.py | 10 ++++++---- lib/ansible/plugins/strategy/__init__.py | 2 ++ lib/ansible/vars/__init__.py | 2 +- lib/ansible/vars/hostvars.py | 5 ++++- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 3e62cb3c99..0f0dba0444 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -198,11 +198,13 @@ class TaskQueueManager: 'hostvars', callable=lambda: hostvars, # FIXME: this is the list of exposed methods to the DictProxy object, plus our - # one special one (set_variable_manager). There's probably a better way + # special ones (set_variable_manager/set_inventory). There's probably a better way # to do this with a proper BaseProxy/DictProxy derivative - exposed=('set_variable_manager', '__contains__', '__delitem__', '__getitem__', - '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', - 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'), + exposed=( + 'set_variable_manager', 'set_inventory', '__contains__', '__delitem__', + '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', + 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' + ), ) self._hostvars_manager = HostVarsManager() self._hostvars_manager.start() diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 38c65552d3..405a8c7b3a 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -246,11 +246,13 @@ class StrategyBase: new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) + self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'add_group': host = result[1] result_item = result[2] self._add_group(host, result_item) + self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'notify_handler': task_result = result[1] diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index c895b59f5f..ea9c38e0b9 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -119,7 +119,7 @@ class VariableManager: self._host_vars_files = data.get('host_vars_files', defaultdict(dict)) self._group_vars_files = data.get('group_vars_files', defaultdict(dict)) self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()) - self._inventory = None + self._inventory = data.get('inventory', None) def _get_cache_entry(self, play=None, host=None, task=None): play_id = "NONE" diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index 246b2c7812..742d3266e7 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -57,13 +57,16 @@ class HostVars(collections.Mapping): def set_variable_manager(self, variable_manager): self._variable_manager = variable_manager + def set_inventory(self, inventory): + self._inventory = inventory + def _find_host(self, host_name): return self._inventory.get_host(host_name) def __getitem__(self, host_name): host = self._find_host(host_name) if host is None: - return j2undefined + raise j2undefined data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) From f10d2c57c825789516f99cf94a88477ef9029e4a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 17 Nov 2015 10:19:56 -0500 Subject: [PATCH 032/590] Restoring templating of hostvars returned by __getitem__ --- lib/ansible/vars/hostvars.py | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index 742d3266e7..130bc3f1e3 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -70,23 +70,14 @@ class HostVars(collections.Mapping): data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) - #**************************************************** - # TESTING REMOVAL OF THIS - #**************************************************** - # Since we template much later now in 2.0, it may be completely unrequired to do - # a full template of the vars returned above, which is quite costly in time when - # the result is large. - # Using cache in order to avoid template call - #sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() - #if sha1_hash in self._cached_result: - # result = self._cached_result[sha1_hash] - #else: - # templar = Templar(variables=data, loader=self._loader) - # result = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) - # self._cached_result[sha1_hash] = result - #return result - #**************************************************** - return data + sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() + if sha1_hash in self._cached_result: + result = self._cached_result[sha1_hash] + else: + templar = Templar(variables=data, loader=self._loader) + result = templar.template(data, fail_on_undefined=False, static_vars=STATIC_VARS) + self._cached_result[sha1_hash] = result + return result def __contains__(self, host_name): return self._find_host(host_name) is not None From d35f615af8c112d69de0886de5ebbcdca407ab8a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 18 Nov 2015 11:17:43 -0800 Subject: [PATCH 033/590] added more debug info for command results --- lib/ansible/plugins/action/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 64e9a18fa7..cde1c8c9ce 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -506,7 +506,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): display.debug("executing the command %s through the connection" % cmd) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.debug("command execution done") + display.debug("command execution done: rc=%s" % (rc)) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type @@ -524,6 +524,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): else: err = stderr + display.debug("stdout=%s, stderr=%s" % (stdout, stderr)) display.debug("done with _low_level_execute_command() (%s)" % (cmd,)) if rc is None: rc = 0 From 25807f5404dd0e8ebcaff9356238a1555b16bce8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 17 Nov 2015 14:22:13 -0500 Subject: [PATCH 034/590] Don't modify things we've put on the queue Fixes #12937 --- lib/ansible/executor/process/result.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index cdc8875631..77967dd9f4 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -142,8 +142,6 @@ class ResultProcess(multiprocessing.Process): # notifies all other threads for notify in result_item['_ansible_notify']: self._send_result(('notify_handler', result, notify)) - # now remove the notify field from the results, as its no longer needed - result_item.pop('_ansible_notify') if 'add_host' in result_item: # this task added a new host (add_host module) From 180159b01da925e46440b48bc62f750fc555bee8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 17 Nov 2015 14:44:46 -0500 Subject: [PATCH 035/590] Adding vars back in and trying to add a little more speed by avoiding copies --- lib/ansible/vars/__init__.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index ea9c38e0b9..aed29150e6 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -101,13 +101,14 @@ class VariableManager: def __getstate__(self): data = dict( - fact_cache = self._fact_cache.copy(), - np_fact_cache = self._nonpersistent_fact_cache.copy(), - vars_cache = self._vars_cache.copy(), - extra_vars = self._extra_vars.copy(), - host_vars_files = self._host_vars_files.copy(), - group_vars_files = self._group_vars_files.copy(), + fact_cache = self._fact_cache, + np_fact_cache = self._nonpersistent_fact_cache, + vars_cache = self._vars_cache, + extra_vars = self._extra_vars, + host_vars_files = self._host_vars_files, + group_vars_files = self._group_vars_files, omit_token = self._omit_token, + #inventory = self._inventory, ) return data @@ -258,6 +259,8 @@ class VariableManager: except KeyError: pass + all_vars['vars'] = all_vars.copy() + if play: all_vars = combine_vars(all_vars, play.get_vars()) From 782aa9a7fd3da661747e4d54bd75bb18feabac40 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Nov 2015 12:09:46 -0800 Subject: [PATCH 036/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 572771d0b1..b1c0249045 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 572771d0b1eb6d94ea9a596b7a719d3a2d0b651b +Subproject commit b1c02490452cb92db9cb5cc18de232e5b599210d diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e5362cc76a..7da1f8d4ca 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e5362cc76a25a734ddacf4d8ac496d9127c4a46d +Subproject commit 7da1f8d4ca3ab8b00e0b3a056d8ba03a4d2bf3a4 From 9b9fb51d9db9472c8f3dee34e23ab0277420bfcf Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 17 Nov 2015 15:37:18 -0500 Subject: [PATCH 037/590] Template the final_environment value in _compute_environment_string() Fixes #13123 --- lib/ansible/plugins/action/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index cde1c8c9ce..124321e67b 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -160,6 +160,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): # these environment settings should not need to merge sub-dicts final_environment.update(environment) + final_environment = self._templar.template(final_environment) return self._connection._shell.env_prefix(**final_environment) def _early_needs_tmp_path(self): From 4d024fc82aba0a2964b7bfa80d93017ba116e810 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 17 Nov 2015 15:05:13 -0800 Subject: [PATCH 038/590] added os_project module --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e19e830e7e..d8e741fb41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -174,6 +174,7 @@ newline being stripped you can change your playbook like this: * openstack: os_nova_flavor * openstack: os_object * openstack: os_port +* openstack: os_project * openstack: os_router * openstack: os_security_group * openstack: os_security_group_rule From 0127d326522c031ba862def4839b0f14fb014e0f Mon Sep 17 00:00:00 2001 From: Matteo Acerbi Date: Wed, 18 Nov 2015 11:20:34 +0100 Subject: [PATCH 039/590] Fix DataLoader's docstring DataLoader.__init__ doesn't take an argument named vault_password --- lib/ansible/parsing/dataloader.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py index aaa878bb5f..c54ba78f1f 100644 --- a/lib/ansible/parsing/dataloader.py +++ b/lib/ansible/parsing/dataloader.py @@ -52,9 +52,7 @@ class DataLoader(): Usage: dl = DataLoader() - (or) - dl = DataLoader(vault_password='foo') - + # optionally: dl.set_vault_password('foo') ds = dl.load('...') ds = dl.load_from_file('/path/to/file') ''' From 0821d251c8878990326ffee567ab2a5baafd089a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guido=20G=C3=BCnther?= Date: Wed, 18 Nov 2015 18:25:43 +0100 Subject: [PATCH 040/590] Add integration tests for zypper Modeled after the yum tests but also tests local package installations as fixed with PR#1256. This depends on PRs #1256, #1261 and #1262 in ansible-modules-extra. --- test/integration/destructive.yml | 1 + .../roles/test_zypper/files/empty.spec | 12 ++ .../roles/test_zypper/meta/main.yml | 2 + .../roles/test_zypper/tasks/main.yml | 26 +++ .../roles/test_zypper/tasks/zypper.yml | 194 ++++++++++++++++++ 5 files changed, 235 insertions(+) create mode 100644 test/integration/roles/test_zypper/files/empty.spec create mode 100644 test/integration/roles/test_zypper/meta/main.yml create mode 100644 test/integration/roles/test_zypper/tasks/main.yml create mode 100644 test/integration/roles/test_zypper/tasks/zypper.yml diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index b8f56d113b..626124d14f 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -18,3 +18,4 @@ - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} - { role: test_docker, tags: test_docker} + - { role: test_zypper, tags: test_zypper} diff --git a/test/integration/roles/test_zypper/files/empty.spec b/test/integration/roles/test_zypper/files/empty.spec new file mode 100644 index 0000000000..044ea3a548 --- /dev/null +++ b/test/integration/roles/test_zypper/files/empty.spec @@ -0,0 +1,12 @@ +Summary: Empty RPM +Name: empty +Version: 1 +Release: 0 +License: GPLv3 +Group: Applications/System +BuildArch: noarch + +%description +Empty RPM + +%files diff --git a/test/integration/roles/test_zypper/meta/main.yml b/test/integration/roles/test_zypper/meta/main.yml new file mode 100644 index 0000000000..07faa21776 --- /dev/null +++ b/test/integration/roles/test_zypper/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/roles/test_zypper/tasks/main.yml b/test/integration/roles/test_zypper/tasks/main.yml new file mode 100644 index 0000000000..5ecdb8684b --- /dev/null +++ b/test/integration/roles/test_zypper/tasks/main.yml @@ -0,0 +1,26 @@ +# test code for the zyppe module +# +# (c) 2015, Guido Günther +# +# heavily based on the yum tests which are +# +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- include: 'zypper.yml' + when: ansible_distribution in ['SLES', 'openSUSE'] + diff --git a/test/integration/roles/test_zypper/tasks/zypper.yml b/test/integration/roles/test_zypper/tasks/zypper.yml new file mode 100644 index 0000000000..8ae04576c8 --- /dev/null +++ b/test/integration/roles/test_zypper/tasks/zypper.yml @@ -0,0 +1,194 @@ +# UNINSTALL +- name: uninstall hello + zypper: name=hello state=removed + register: zypper_result + +- name: check hello with rpm + shell: rpm -q hello + failed_when: False + register: rpm_result + +- debug: var=zypper_result +- debug: var=rpm_result + +- name: verify uninstallation of hello + assert: + that: + - "zypper_result.rc == 0" + - "rpm_result.rc == 1" + +# UNINSTALL AGAIN +- name: uninstall hello again + zypper: name=hello state=removed + register: zypper_result + +- name: verify no change on re-uninstall + assert: + that: + - "not zypper_result.changed" + +# INSTALL +- name: install hello + zypper: name=hello state=present + register: zypper_result + +- name: check hello with rpm + shell: rpm -q hello + failed_when: False + register: rpm_result + +- debug: var=zypper_result +- debug: var=rpm_result + +- name: verify installation of hello + assert: + that: + - "zypper_result.rc == 0" + - "zypper_result.changed" + - "rpm_result.rc == 0" + +# INSTALL AGAIN +- name: install hello again + zypper: name=hello state=present + register: zypper_result + +- name: verify no change on second install + assert: + that: + - "not zypper_result.changed" + +# Multiple packages +- name: uninstall hello and metamail + zypper: + name: + - hello + - metamail + state: removed + register: zypper_result + +- name: check hello with rpm + shell: rpm -q hello + failed_when: False + register: rpm_hello_result + +- name: check metamail with rpm + shell: rpm -q metamail + failed_when: False + register: rpm_metamail_result + +- name: verify packages uninstalled + assert: + that: + - "rpm_hello_result.rc != 0" + - "rpm_metamail_result.rc != 0" + +- name: install hello and metamail + zypper: + name: + - hello + - metamail + state: present + register: zypper_result + +- name: check hello with rpm + shell: rpm -q hello + failed_when: False + register: rpm_hello_result + +- name: check metamail with rpm + shell: rpm -q metamail + failed_when: False + register: rpm_metamail_result + +- name: verify packages installed + assert: + that: + - "zypper_result.rc == 0" + - "zypper_result.changed" + - "rpm_hello_result.rc == 0" + - "rpm_metamail_result.rc == 0" + +- name: uninstall hello and metamail + zypper: + name: + - hello + - metamail + state: removed + +# INSTALL nonexistent package +- name: install hello from url + zypper: name=doesnotexist state=present + register: zypper_result + ignore_errors: yes + +- name: verify package installation failed + assert: + that: + - "zypper_result.rc == 104" + - "zypper_result.msg.startswith('No provider of')" + +# INSTALL broken local package +- name: create directory + file: + path: "{{output_dir | expanduser}}/zypper" + state: directory + +- name: fake rpm package + file: + path: "{{output_dir | expanduser}}/zypper/broken.rpm" + state: touch + +- name: install broken rpm + zypper: + name="{{output_dir | expanduser}}/zypper/broken.rpm" + state=present + register: zypper_result + ignore_errors: yes + +- debug: var=zypper_result + +- name: verify we failed installation of broken rpm + assert: + that: + - "zypper_result.rc == 1" + - "'broken.rpm: not an rpm package' in zypper_result.msg" + +# Build and install an empty rpm +- name: copy spec file + copy: + src: empty.spec + dest: "{{ output_dir | expanduser }}/zypper/empty.spec" + +- name: build rpm + command: | + rpmbuild -bb \ + --define "_topdir {{output_dir | expanduser }}/zypper/rpm-build" + --define "_builddir %{_topdir}" \ + --define "_rpmdir %{_topdir}" \ + --define "_srcrpmdir %{_topdir}" \ + --define "_specdir {{output_dir | expanduser}}/zypper" \ + --define "_sourcedir %{_topdir}" \ + {{ output_dir }}/zypper/empty.spec + register: rpm_build_result + +- name: install empty rpm + zypper: + name: "{{ output_dir | expanduser }}/zypper/rpm-build/noarch/empty-1-0.noarch.rpm" + register: zypper_result + +- name: check empty with rpm + shell: rpm -q empty + failed_when: False + register: rpm_result + +- name: verify installation of empty + assert: + that: + - "zypper_result.rc == 0" + - "zypper_result.changed" + - "rpm_result.rc == 0" + +- name: uninstall empry + zypper: + name: empty + state: removed From 549163170fe34c3f1bc066ca69137522133b16b1 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Fri, 23 Oct 2015 14:37:50 -0700 Subject: [PATCH 041/590] fast winrm put_file without size restrictions --- lib/ansible/plugins/connection/winrm.py | 137 ++++++++++++++++++------ 1 file changed, 102 insertions(+), 35 deletions(-) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index 5ae83adab3..5e75da1dc9 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -24,6 +24,8 @@ import os import re import shlex import traceback +import json +import xmltodict from ansible.compat.six.moves.urllib.parse import urlunsplit @@ -44,6 +46,7 @@ except ImportError: from ansible.errors import AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.hashing import secure_hash from ansible.utils.path import makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode, to_str from ansible.utils.vars import combine_vars @@ -151,7 +154,21 @@ class Connection(ConnectionBase): else: raise AnsibleError('No transport found for WinRM connection') - def _winrm_exec(self, command, args=(), from_exec=False): + def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False): + rq = {'env:Envelope': protocol._get_soap_header( + resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', + action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send', + shell_id=shell_id)} + stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\ + .setdefault('rsp:Stream', {}) + stream['@Name'] = 'stdin' + stream['@CommandId'] = command_id + stream['#text'] = base64.b64encode(to_bytes(stdin)) + if eof: + stream['@End'] = 'true' + rs = protocol.send_message(xmltodict.unparse(rq)) + + def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None): if from_exec: display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) else: @@ -162,7 +179,19 @@ class Connection(ConnectionBase): self.shell_id = self.protocol.open_shell(codepage=65001) # UTF-8 command_id = None try: - command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args)) + command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator == None)) + + # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that comes from this) + try: + if stdin_iterator: + for (data, is_last) in stdin_iterator: + self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last) + except: + # TODO: set/propagate an error flag, but don't throw (or include the command output in the exception) + pass + + # NB: this could hang if the receiver is still running (eg, network failed a Send request but the server's still happy). + # Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. response = Response(self.protocol.get_command_output(self.shell_id, command_id)) if from_exec: display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host) @@ -212,45 +241,83 @@ class Connection(ConnectionBase): result.std_err = to_bytes(result.std_err) return (result.status_code, result.std_out, result.std_err) + # FUTURE: determine buffer size at runtime via remote winrm config? + def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000): + in_size = os.path.getsize(in_path) + offset = 0 + with open(in_path, 'rb') as in_file: + for out_data in iter((lambda:in_file.read(buffer_size)), ''): + offset += len(out_data) + self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host) + # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded + b64_data = base64.b64encode(out_data) + '\r\n' + # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal + yield b64_data, (in_file.tell() == in_size) + + if offset == 0: # empty file, return an empty buffer + eof to close it + yield "", True + def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) out_path = self._shell._unquote(out_path) display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) if not os.path.exists(in_path): raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path) - with open(in_path) as in_file: - in_size = os.path.getsize(in_path) - script_template = ''' - $s = [System.IO.File]::OpenWrite("%s"); - [void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin); - $b = [System.Convert]::FromBase64String("%s"); - [void]$s.Write($b, 0, $b.length); - [void]$s.SetLength(%d); - [void]$s.Close(); - ''' - # Determine max size of data we can pass per command. - script = script_template % (self._shell._escape(out_path), in_size, '', in_size) - cmd = self._shell._encode_script(script) - # Encode script with no data, subtract its length from 8190 (max - # windows command length), divide by 2.67 (UTF16LE base64 command - # encoding), then by 1.35 again (data base64 encoding). - buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35) - for offset in xrange(0, in_size or 1, buffer_size): - try: - out_data = in_file.read(buffer_size) - if offset == 0: - if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'): - out_path = out_path + '.ps1' - b64_data = base64.b64encode(out_data) - script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size) - display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host) - cmd_parts = self._shell._encode_script(script, as_list=True) - result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) - if result.status_code != 0: - raise IOError(to_str(result.std_err)) - except Exception: - traceback.print_exc() - raise AnsibleError('failed to transfer file to "%s"' % out_path) + + script_template = ''' + begin {{ + $path = "{0}" + + $DebugPreference = "Continue" + $ErrorActionPreference = "Stop" + Set-StrictMode -Version 2 + + $fd = [System.IO.File]::Create($path) + + $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() + + $bytes = @() #initialize for empty file case + }} + process {{ + $bytes = [System.Convert]::FromBase64String($input) + $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null + $fd.Write($bytes, 0, $bytes.Length) + }} + end {{ + $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null + + $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant() + + $fd.Close() + + Write-Output "{{""sha1"":""$hash""}}" + }} + ''' + + # FUTURE: this sucks- why can't the module/shell stuff do this? + with open(in_path, 'r') as temp_file: + if temp_file.read(15).lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'): + out_path = out_path + '.ps1' + + script = script_template.format(self._shell._escape(out_path)) + cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False) + + result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path)) + # TODO: improve error handling + if result.status_code != 0: + raise IOError(to_str(result.std_err)) + + put_output = json.loads(result.std_out) + remote_sha1 = put_output.get("sha1") + + if not remote_sha1: + raise IOError("Remote sha1 was not returned") + + local_sha1 = secure_hash(in_path) + + if not remote_sha1 == local_sha1: + raise IOError("Remote sha1 hash {0} does not match local hash {1}".format(remote_sha1, local_sha1)) + def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) From 9dcfbb1130119084e3faa180d06dd2351e38b5d0 Mon Sep 17 00:00:00 2001 From: Codey Oxley Date: Wed, 18 Nov 2015 14:01:30 -0800 Subject: [PATCH 042/590] Removed version argument from argparse This was breaking standalone execution and isn't needed --- contrib/inventory/nsot.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/contrib/inventory/nsot.py b/contrib/inventory/nsot.py index 611e3cc0d0..0ca1625df3 100644 --- a/contrib/inventory/nsot.py +++ b/contrib/inventory/nsot.py @@ -148,9 +148,6 @@ from pynsot.client import get_api_client from pynsot.app import HttpServerError from click.exceptions import UsageError -# Version source of truth is in setup.py -__version__ = pkg_resources.require('ansible_nsot')[0].version - def warning(*objs): print("WARNING: ", *objs, file=sys.stderr) @@ -299,7 +296,6 @@ def parse_args(): # Establish parser with options and error out if no action provided parser = argparse.ArgumentParser( description=desc, - version=__version__, conflict_handler='resolve', ) From f8ed99e5e4c64d10c5f870ec99994ad0bd794ed1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Nov 2015 11:10:09 -0800 Subject: [PATCH 043/590] Revert "success should not include skipped" This reverts commit 300ee227a2e6d8017b9c9b34cf56702a827407f5. --- lib/ansible/plugins/test/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py index 06fa687e24..1bd789260f 100644 --- a/lib/ansible/plugins/test/core.py +++ b/lib/ansible/plugins/test/core.py @@ -36,7 +36,7 @@ def failed(*a, **kw): def success(*a, **kw): ''' Test if task result yields success ''' - return not failed(*a, **kw) and not skipped(*a, **kw) + return not failed(*a, **kw) def changed(*a, **kw): ''' Test if task result yields changed ''' From dea07438b5843d6de76e4c6b64e644c67c60c41d Mon Sep 17 00:00:00 2001 From: Chris Church Date: Wed, 18 Nov 2015 17:49:21 -0500 Subject: [PATCH 044/590] Recommend using pywinrm >= 0.1.1 from PyPI instead of GitHub version. --- docsite/rst/intro_windows.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 64cd64b885..e5cbb94faf 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -26,7 +26,7 @@ Installing on the Control Machine On a Linux control machine:: - pip install https://github.com/diyan/pywinrm/archive/master.zip#egg=pywinrm + pip install "pywinrm>=0.1.1" Active Directory Support ++++++++++++++++++++++++ From 05b542eb8e28b85da22281476fea3b8625b37715 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Nov 2015 14:31:55 -0800 Subject: [PATCH 045/590] added nsot inventory plugin --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8e741fb41..a85cef0790 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -267,6 +267,7 @@ newline being stripped you can change your playbook like this: * fleetctl * openvz * nagios_ndo +* nsot * proxmox * rudder * serf From 3d1255d19009aa084fd2635917916fd9396ea898 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 18 Nov 2015 19:12:38 -0500 Subject: [PATCH 046/590] Don't update job vars too early when getting loop items in TaskExecutor Fixes #13113 --- lib/ansible/executor/task_executor.py | 28 +++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 2dcb5f9631..48ba2f2414 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -153,16 +153,19 @@ class TaskExecutor: and returns the items result. ''' - # create a copy of the job vars here so that we can modify - # them temporarily without changing them too early for other - # parts of the code that might still need a pristine version - #vars_copy = self._job_vars.copy() - vars_copy = self._job_vars + # save the play context variables to a temporary dictionary, + # so that we can modify the job vars without doing a full copy + # and later restore them to avoid modifying things too early + play_context_vars = dict() + self._play_context.update_vars(play_context_vars) - # now we update them with the play context vars - self._play_context.update_vars(vars_copy) + old_vars = dict() + for k in play_context_vars.keys(): + if k in self._job_vars: + old_vars[k] = self._job_vars[k] + self._job_vars[k] = play_context_vars[k] - templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy) + templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars) items = None if self._task.loop: if self._task.loop in self._shared_loader_obj.lookup_loader: @@ -189,6 +192,15 @@ class TaskExecutor: else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) + # now we restore any old job variables that may have been modified, + # and delete them if they were in the play context vars but not in + # the old variables dictionary + for k in play_context_vars.keys(): + if k in old_vars: + self._job_vars[k] = old_vars[k] + else: + del self._job_vars[k] + if items: from ansible.vars.unsafe_proxy import UnsafeProxy for idx, item in enumerate(items): From db83c0e7cdac8e86026eeae5314185c667e6da1c Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 18 Nov 2015 23:09:16 -0800 Subject: [PATCH 047/590] winrm error handling tweaks --- lib/ansible/plugins/connection/winrm.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index 5e75da1dc9..aa0c7b35e5 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -179,6 +179,7 @@ class Connection(ConnectionBase): self.shell_id = self.protocol.open_shell(codepage=65001) # UTF-8 command_id = None try: + stdin_push_failed = False command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator == None)) # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that comes from this) @@ -187,11 +188,10 @@ class Connection(ConnectionBase): for (data, is_last) in stdin_iterator: self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last) except: - # TODO: set/propagate an error flag, but don't throw (or include the command output in the exception) - pass + stdin_push_failed = True # NB: this could hang if the receiver is still running (eg, network failed a Send request but the server's still happy). - # Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. + # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. response = Response(self.protocol.get_command_output(self.shell_id, command_id)) if from_exec: display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host) @@ -199,6 +199,10 @@ class Connection(ConnectionBase): display.vvvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host) display.vvvvvv('WINRM STDOUT %s' % to_unicode(response.std_out), host=self._winrm_host) display.vvvvvv('WINRM STDERR %s' % to_unicode(response.std_err), host=self._winrm_host) + + if stdin_push_failed: + raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err)) + return response finally: if command_id: @@ -305,18 +309,18 @@ class Connection(ConnectionBase): result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path)) # TODO: improve error handling if result.status_code != 0: - raise IOError(to_str(result.std_err)) + raise AnsibleError(to_str(result.std_err)) put_output = json.loads(result.std_out) remote_sha1 = put_output.get("sha1") if not remote_sha1: - raise IOError("Remote sha1 was not returned") + raise AnsibleError("Remote sha1 was not returned") local_sha1 = secure_hash(in_path) if not remote_sha1 == local_sha1: - raise IOError("Remote sha1 hash {0} does not match local hash {1}".format(remote_sha1, local_sha1)) + raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(remote_sha1, local_sha1)) def fetch_file(self, in_path, out_path): From 66347c9449486a63d9be3f5e003e2aac2d8578ff Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Thu, 19 Nov 2015 09:21:06 +0200 Subject: [PATCH 048/590] Fix NameError when using loops vars_copy disappeared in 3d1255d19009aa084fd2635917916fd9396ea898. Fixes #13213. --- lib/ansible/executor/task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 48ba2f2414..fb1e57d4ce 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -188,7 +188,7 @@ class TaskExecutor: else: raise items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, - templar=templar).run(terms=loop_terms, variables=vars_copy) + templar=templar).run(terms=loop_terms, variables=self._job_vars) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) From 78e4f176e61673b4f52ad952e96ed431dd4b7148 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 19 Nov 2015 08:29:15 -0500 Subject: [PATCH 049/590] Return skipped/failed async results directly Fixes #13205 --- lib/ansible/executor/task_executor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index fb1e57d4ce..27e11bed79 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -405,6 +405,8 @@ class TaskExecutor: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result try: + if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']: + return result result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) From 91500f8f5f0def49120daa941d139c3809843f9d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 19 Nov 2015 09:01:51 -0500 Subject: [PATCH 050/590] Fix include param precedence in variable manager --- lib/ansible/playbook/task.py | 8 ++++++++ lib/ansible/vars/__init__.py | 6 ++++++ test/units/vars/test_variable_manager.py | 1 + 3 files changed, 15 insertions(+) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 38cca4b3a2..4f326b628b 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -272,6 +272,14 @@ class Task(Base, Conditional, Taggable, Become): return all_vars + def get_include_params(self): + all_vars = dict() + if self._task_include: + all_vars.update(self._task_include.get_include_params()) + if self.action == 'include': + all_vars.update(self.vars) + return all_vars + def copy(self, exclude_block=False): new_me = super(Task, self).copy() diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index aed29150e6..5819f0547f 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -319,6 +319,12 @@ class VariableManager: all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict())) all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict())) + # special case for include tasks, where the include params + # may be specified in the vars field for the task, which should + # have higher precedence than the vars/np facts above + if task: + all_vars = combine_vars(all_vars, task.get_include_params()) + all_vars = combine_vars(all_vars, self._extra_vars) all_vars = combine_vars(all_vars, magic_variables) diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py index acd8e5c898..41f08138a5 100644 --- a/test/units/vars/test_variable_manager.py +++ b/test/units/vars/test_variable_manager.py @@ -173,6 +173,7 @@ class TestVariableManager(unittest.TestCase): mock_task._role = None mock_task.loop = None mock_task.get_vars.return_value = dict(foo="bar") + mock_task.get_include_params.return_value = dict() v = VariableManager() self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar") From 8a0d2e0ef2ac238fd167cb67f8d20d4baed9eeff Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 19 Nov 2015 09:08:49 -0500 Subject: [PATCH 051/590] Submodule pointer update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b1c0249045..88e0bfd75d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b1c02490452cb92db9cb5cc18de232e5b599210d +Subproject commit 88e0bfd75df9de563f9991b3dab7aebfbf8a9bf3 From c86120cea60049ae5c0dc5213cf1ace62515bf58 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 19 Nov 2015 09:39:37 -0800 Subject: [PATCH 052/590] Fix non-module plugins picking up files that did not end in .py. This was caused by accessing the cache using the passed in mod_type rather than the suffix that we calculate with knowledge of whether this is a module or non-module plugin. --- lib/ansible/plugins/__init__.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 87de300e3c..139e5a7d61 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -213,15 +213,6 @@ class PluginLoader: def find_plugin(self, name, mod_type=''): ''' Find a plugin named name ''' - # The particular cache to look for modules within. This matches the - # requested mod_type - pull_cache = self._plugin_path_cache[mod_type] - try: - return pull_cache[name] - except KeyError: - # Cache miss. Now let's find the plugin - pass - if mod_type: suffix = mod_type elif self.class_name: @@ -232,6 +223,15 @@ class PluginLoader: # they can have any suffix suffix = '' + # The particular cache to look for modules within. This matches the + # requested mod_type + pull_cache = self._plugin_path_cache[suffix] + try: + return pull_cache[name] + except KeyError: + # Cache miss. Now let's find the plugin + pass + # TODO: Instead of using the self._paths cache (PATH_CACHE) and # self._searched_paths we could use an iterator. Before enabling that # we need to make sure we don't want to add additional directories From d75e707af595d99863540b6c5f8462b2c242f0e8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 19 Nov 2015 09:55:06 -0800 Subject: [PATCH 053/590] Simplify code a little --- lib/ansible/plugins/action/synchronize.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index 9bf12132ed..45004d5ed4 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -134,10 +134,7 @@ class ActionModule(ActionBase): try: dest_host = dest_host_inventory_vars['ansible_host'] except KeyError: - try: - dest_host = dest_host_inventory_vars['ansible_ssh_host'] - except KeyError: - dest_host = inventory_hostname + dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname) dest_is_local = dest_host in C.LOCALHOST From 5fc64deffeccb333e5659e48b1610923d211dc84 Mon Sep 17 00:00:00 2001 From: Brano Zarnovican Date: Fri, 20 Nov 2015 12:59:08 +0100 Subject: [PATCH 054/590] test_hg fix: remove reference to "head" ERROR! error while evaluating conditional: head.stat.isreg This is remnant from earlier change 50e5d81777e6228cc90b982c111e5f51e78e965d which removed stat on head file.. --- test/integration/roles/test_hg/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/roles/test_hg/tasks/main.yml b/test/integration/roles/test_hg/tasks/main.yml index c6d6f70b06..4eee22e4c7 100644 --- a/test/integration/roles/test_hg/tasks/main.yml +++ b/test/integration/roles/test_hg/tasks/main.yml @@ -66,7 +66,6 @@ assert: that: - "tags.stat.isreg" - - "head.stat.isreg" - "branches.stat.isreg" - name: verify on a reclone things are marked unchanged From a30a52ef528a9728ceae4970282ea6f326ce293c Mon Sep 17 00:00:00 2001 From: Brano Zarnovican Date: Fri, 20 Nov 2015 13:36:55 +0100 Subject: [PATCH 055/590] test_svn fix: remove hardcoded "~/ansible_testing/svn" path --- test/integration/roles/test_subversion/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_subversion/tasks/main.yml b/test/integration/roles/test_subversion/tasks/main.yml index 8b28688a9e..d15d63ab02 100644 --- a/test/integration/roles/test_subversion/tasks/main.yml +++ b/test/integration/roles/test_subversion/tasks/main.yml @@ -36,7 +36,7 @@ - debug: var=subverted -- shell: ls ~/ansible_testing/svn +- shell: ls {{ checkout_dir }} # FIXME: the before/after logic here should be fixed to make them hashes, see GitHub 6078 # looks like this: { From 591c81e95f5bb759c57124bcea46617310fbcbc0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Nov 2015 07:45:00 -0800 Subject: [PATCH 056/590] Docker cp sets file ownership to root:root so we can't use it. Fixes #13219 --- lib/ansible/plugins/connection/docker.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 308dea0edc..4e08f56a09 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -80,8 +80,10 @@ class Connection(ConnectionBase): docker_version = self._get_docker_version() if LooseVersion(docker_version) < LooseVersion('1.3'): raise AnsibleError('docker connection type requires docker 1.3 or higher') - if LooseVersion(docker_version) >= LooseVersion('1.8.0'): - self.can_copy_bothways = True + # Docker cp in 1.8.0 sets the owner and group to root rather than the + # user that the docker container is set to use by default. + #if LooseVersion(docker_version) >= LooseVersion('1.8.0'): + # self.can_copy_bothways = True @staticmethod def _sanitize_version(version): @@ -93,7 +95,7 @@ class Connection(ConnectionBase): cmd_output = subprocess.check_output(cmd) for line in cmd_output.split('\n'): - if line.startswith('Server version:'): # old docker versions + if line.startswith('Server version:'): # old docker versions return self._sanitize_version(line.split()[2]) # no result yet, must be newer Docker version From 087dbc1ed50516391f30589d58f00627e9288b0b Mon Sep 17 00:00:00 2001 From: Sebastien Couture Date: Fri, 20 Nov 2015 12:58:50 -0500 Subject: [PATCH 057/590] We should give pipes.quote() a string every time --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 124321e67b..fcbd1e2c21 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -409,7 +409,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): # the remote system, which can be read and parsed by the module args_data = "" for k,v in iteritems(module_args): - args_data += '%s="%s" ' % (k, pipes.quote(v)) + args_data += '%s="%s" ' % (k, pipes.quote(text_type(v))) self._transfer_data(args_file_path, args_data) display.debug("done transferring module to remote") From 664c7980a297d760949e9b97be36ab2659b164a1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Nov 2015 12:42:44 -0800 Subject: [PATCH 058/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 88e0bfd75d..f474261696 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 88e0bfd75df9de563f9991b3dab7aebfbf8a9bf3 +Subproject commit f474261696266f47676b71fad34f48fb04e358ed diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 7da1f8d4ca..99088afd47 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 7da1f8d4ca3ab8b00e0b3a056d8ba03a4d2bf3a4 +Subproject commit 99088afd4740a106764688facdcbbf03d3ba94c9 From a9b55e341a33c646c25e53cfd6fa8b68ff212c06 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Nov 2015 12:43:30 -0800 Subject: [PATCH 059/590] Add docker_login module to the changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a85cef0790..8a18f6d43a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -154,6 +154,7 @@ newline being stripped you can change your playbook like this: * datadog_monitor * deploy_helper * dpkg_selections +* docker: docker_login * elasticsearch_plugin * expect * find From 77c83fd520e221f644797a393ccc604be4510c92 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Nov 2015 12:46:22 -0800 Subject: [PATCH 060/590] Commit submodule refs to the devel HEAD --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index f474261696..1ff0704c0c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit f474261696266f47676b71fad34f48fb04e358ed +Subproject commit 1ff0704c0c6dbc88e08f19620d7325aa5790d40f diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 99088afd47..19374903ac 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 99088afd4740a106764688facdcbbf03d3ba94c9 +Subproject commit 19374903ac679ee100a146f0615b92517020193b From dc60e08aa046931d6b6fffe4b6dc3afa5e8a21f8 Mon Sep 17 00:00:00 2001 From: Gilles Cornu Date: Thu, 25 Dec 2014 01:37:47 +0100 Subject: [PATCH 061/590] Documentation: Update the Vagrant Guide This is an attempt to solve #7665. Revert the change applied by f56a6e0951af9ae68106a8f9569cc375f53d5b52 (#12310), as the inventory generated by Vagrant still rely on the legacy `_ssh` setting names for backwards compatibility reasons. See also https://github.com/mitchellh/vagrant/issues/6570 --- docsite/rst/guide_vagrant.rst | 151 +++++++++++++++++++--------------- 1 file changed, 86 insertions(+), 65 deletions(-) diff --git a/docsite/rst/guide_vagrant.rst b/docsite/rst/guide_vagrant.rst index 2aad2f1a03..e5870bdd85 100644 --- a/docsite/rst/guide_vagrant.rst +++ b/docsite/rst/guide_vagrant.rst @@ -6,12 +6,13 @@ Using Vagrant and Ansible Introduction ```````````` -Vagrant is a tool to manage virtual machine environments, and allows you to -configure and use reproducible work environments on top of various -virtualization and cloud platforms. It also has integration with Ansible as a -provisioner for these virtual machines, and the two tools work together well. +`Vagrant `_ is a tool to manage virtual machine +environments, and allows you to configure and use reproducible work +environments on top of various virtualization and cloud platforms. +It also has integration with Ansible as a provisioner for these virtual +machines, and the two tools work together well. -This guide will describe how to use Vagrant and Ansible together. +This guide will describe how to use Vagrant 1.7+ and Ansible together. If you're not familiar with Vagrant, you should visit `the documentation `_. @@ -27,54 +28,48 @@ Vagrant Setup The first step once you've installed Vagrant is to create a ``Vagrantfile`` and customize it to suit your needs. This is covered in detail in the Vagrant -documentation, but here is a quick example: - -.. code-block:: bash - - $ mkdir vagrant-test - $ cd vagrant-test - $ vagrant init precise32 http://files.vagrantup.com/precise32.box - -This will create a file called Vagrantfile that you can edit to suit your -needs. The default Vagrantfile has a lot of comments. Here is a simplified -example that includes a section to use the Ansible provisioner: +documentation, but here is a quick example that includes a section to use the +Ansible provisioner to manage a single machine: .. code-block:: ruby - # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! - VAGRANTFILE_API_VERSION = "2" - - Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "precise32" - config.vm.box_url = "http://files.vagrantup.com/precise32.box" - - config.vm.network :public_network + # This guide is optimized for Vagrant 1.7 and above. + # Although versions 1.6.x should behave very similarly, it is recommended + # to upgrade instead of disabling the requirement below. + Vagrant.require_version ">= 1.7.0" - config.vm.provision "ansible" do |ansible| - ansible.playbook = "playbook.yml" - end + Vagrant.configure(2) do |config| + + config.vm.box = "ubuntu/trusty64" + + # Disable the new default behavior introduced in Vagrant 1.7, to + # ensure that all Vagrant machines will use the same SSH key pair. + # See https://github.com/mitchellh/vagrant/issues/5005 + config.ssh.insert_key = false + + config.vm.provision "ansible" do |ansible| + ansible.verbose = "v" + ansible.playbook = "playbook.yml" end + end -The Vagrantfile has a lot of options, but these are the most important ones. Notice the ``config.vm.provision`` section that refers to an Ansible playbook -called ``playbook.yml`` in the same directory as the Vagrantfile. Vagrant runs -the provisioner once the virtual machine has booted and is ready for SSH +called ``playbook.yml`` in the same directory as the ``Vagrantfile``. Vagrant +runs the provisioner once the virtual machine has booted and is ready for SSH access. +There are a lot of Ansible options you can configure in your ``Vagrantfile``. +Visit the `Ansible Provisioner documentation +`_ for more +information. + .. code-block:: bash $ vagrant up -This will start the VM and run the provisioning playbook. +This will start the VM, and run the provisioning playbook (on the first VM +startup). -There are a lot of Ansible options you can configure in your Vagrantfile. Some -particularly useful options are ``ansible.extra_vars``, ``ansible.sudo`` and -``ansible.sudo_user``, and ``ansible.host_key_checking`` which you can disable -to avoid SSH connection problems to new virtual machines. - -Visit the `Ansible Provisioner documentation -`_ for more -information. To re-run a playbook on an existing VM, just run: @@ -82,7 +77,19 @@ To re-run a playbook on an existing VM, just run: $ vagrant provision -This will re-run the playbook. +This will re-run the playbook against the existing VM. + +Note that having the ``ansible.verbose`` option enabled will instruct Vagrant +to show the full ``ansible-playbook`` command used behind the scene, as +illustrated by this example: + +.. code-block:: bash + + $ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook --private-key=/home/someone/.vagrant.d/insecure_private_key --user=vagrant --connection=ssh --limit='machine1' --inventory-file=/home/someone/coding-in-a-project/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml + +This information can be quite useful to debug integration issues and can also +be used to manually execute Ansible from a shell, as explained in the next +section. .. _running_ansible: @@ -90,44 +97,58 @@ Running Ansible Manually ```````````````````````` Sometimes you may want to run Ansible manually against the machines. This is -pretty easy to do. +faster than kicking ``vagrant provision`` and pretty easy to do. -Vagrant automatically creates an inventory file for each Vagrant machine in -the same directory located under ``.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory``. -It configures the inventory file according to the SSH tunnel that Vagrant -automatically creates, and executes ``ansible-playbook`` with the correct -username and SSH key options to allow access. A typical automatically-created -inventory file may look something like this: +With our ``Vagrantfile`` example, Vagrant automatically creates an Ansible +inventory file in ``.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory``. +This inventory is configured according to the SSH tunnel that Vagrant +automatically creates. A typical automatically-created inventory file for a +single machine environment may look something like this: .. code-block:: none # Generated by Vagrant - machine ansible_host=127.0.0.1 ansible_port=2222 - -.. include:: ansible_ssh_changes_note.rst + default ansible_ssh_host=127.0.0.1 ansible_ssh_port=2222 If you want to run Ansible manually, you will want to make sure to pass -``ansible`` or ``ansible-playbook`` commands the correct arguments for the -username (usually ``vagrant``) and the SSH key (since Vagrant 1.7.0, this will be something like -``.vagrant/machines/[machine name]/[provider]/private_key``), and the autogenerated inventory file. +``ansible`` or ``ansible-playbook`` commands the correct arguments, at least +for the *username*, the *SSH private key* and the *inventory*. -Here is an example: +Here is an example using the Vagrant global insecure key (``config.ssh.insert_key`` +must be set to ``false`` in your ``Vagrantfile``): .. code-block:: bash - - $ ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory --private-key=.vagrant/machines/default/virtualbox/private_key -u vagrant playbook.yml -Note: Vagrant versions prior to 1.7.0 will use the private key located at ``~/.vagrant.d/insecure_private_key.`` + $ ansible-playbook --private-key=~/.vagrant.d/insecure_private_key -u vagrant -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml + +Here is a second example using the random private key that Vagrant 1.7+ +automatically configures for each new VM (each key is stored in a path like +``.vagrant/machines/[machine name]/[provider]/private_key``): + +.. code-block:: bash + + $ ansible-playbook --private-key=.vagrant/machines/default/virtualbox/private_key -u vagrant -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml + +Advanced Usages +``````````````` + +The "Tips and Tricks" chapter of the `Ansible Provisioner documentation +`_ provides detailed information about more advanced Ansible features like: + + - how to parallely execute a playbook in a multi-machine environment + - how to integrate a local ``ansible.cfg`` configuration file .. seealso:: - `Vagrant Home `_ - The Vagrant homepage with downloads - `Vagrant Documentation `_ - Vagrant Documentation - `Ansible Provisioner `_ - The Vagrant documentation for the Ansible provisioner - :doc:`playbooks` - An introduction to playbooks + `Vagrant Home `_ + The Vagrant homepage with downloads + `Vagrant Documentation `_ + Vagrant Documentation + `Ansible Provisioner `_ + The Vagrant documentation for the Ansible provisioner + `Vagrant Issue Tracker `_ + The open issues for the Ansible provisioner in the Vagrant project + :doc:`playbooks` + An introduction to playbooks From 2631a8e6e49116433ca365c9fd46c20546a1c01b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Nov 2015 14:14:12 -0800 Subject: [PATCH 062/590] Update extras to fix docs build --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 19374903ac..035ad1d140 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 19374903ac679ee100a146f0615b92517020193b +Subproject commit 035ad1d140f6cbc6fd8fe310e8710f9572887ca0 From fb96748d7c0a4297dd5240c8c1aef533d7440fe3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 22 Nov 2015 09:52:34 -0800 Subject: [PATCH 063/590] fixes to fetch action module * now only runs remote checksum when needed (fixes #12290) * unified return points to simplify program flow --- lib/ansible/plugins/action/fetch.py | 33 ++++++++++++++--------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py index 478eac3f82..7c7f53de63 100644 --- a/lib/ansible/plugins/action/fetch.py +++ b/lib/ansible/plugins/action/fetch.py @@ -60,20 +60,22 @@ class ActionModule(ActionBase): source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) - # calculate checksum for the remote file - remote_checksum = self._remote_checksum(source, all_vars=task_vars) + remote_checksum = None + if not self._play_context.become: + # calculate checksum for the remote file, don't bother if using become as slurp will be used + remote_checksum = self._remote_checksum(source, all_vars=task_vars) - # use slurp if sudo and permissions are lacking + # use slurp if permissions are lacking or privilege escalation is needed remote_data = None - if remote_checksum in ('1', '2') or self._play_context.become: + if remote_checksum in ('1', '2', None): slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp) if slurpres.get('failed'): if remote_checksum == '1' and not fail_on_missing: result['msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False - return result - result.update(slurpres) + else: + result.update(slurpres) return result else: if slurpres['encoding'] == 'base64': @@ -115,8 +117,8 @@ class ActionModule(ActionBase): dest = dest.replace("//","/") if remote_checksum in ('0', '1', '2', '3', '4'): - # these don't fail because you may want to transfer a log file that possibly MAY exist - # but keep going to fetch other log files + # these don't fail because you may want to transfer a log file that + # possibly MAY exist but keep going to fetch other log files if remote_checksum == '0': result['msg'] = "unable to calculate the checksum of the remote file" result['file'] = source @@ -162,8 +164,7 @@ class ActionModule(ActionBase): except (IOError, OSError) as e: raise AnsibleError("Failed to fetch the file: %s" % e) new_checksum = secure_hash(dest) - # For backwards compatibility. We'll return None on FIPS enabled - # systems + # For backwards compatibility. We'll return None on FIPS enabled systems try: new_md5 = md5(dest) except ValueError: @@ -171,16 +172,14 @@ class ActionModule(ActionBase): if validate_checksum and new_checksum != remote_checksum: result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) - return result - result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) - return result + else: + result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: - # For backwards compatibility. We'll return None on FIPS enabled - # systems + # For backwards compatibility. We'll return None on FIPS enabled systems try: local_md5 = md5(dest) except ValueError: local_md5 = None - result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) - return result + + return result From 478c6c756a8caae4c8e1dacbff9104056b1e1738 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 22 Nov 2015 14:44:08 -0800 Subject: [PATCH 064/590] marked spot that should send per item reulsts --- lib/ansible/executor/task_executor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 27e11bed79..0e1ebb600c 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -244,6 +244,7 @@ class TaskExecutor: # now update the result with the item info, and append the result # to the list of results res['item'] = item + #TODO: send item results to callback here, instead of all at the end results.append(res) return results From a9f5837157a2c468cac4fa4ec5c1c4c398600aef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Mon, 23 Nov 2015 07:10:47 +0100 Subject: [PATCH 065/590] docsite: cloudstack: fix missing quotes in example --- docsite/rst/guide_cloudstack.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/guide_cloudstack.rst b/docsite/rst/guide_cloudstack.rst index 93b4540b09..c798b26ea1 100644 --- a/docsite/rst/guide_cloudstack.rst +++ b/docsite/rst/guide_cloudstack.rst @@ -178,8 +178,8 @@ Now to the fun part. We create a playbook to create our infrastructure we call i - name: ensure firewall ports opened cs_firewall: - ip_address: {{ public_ip }} - port: {{ item.port }} + ip_address: "{{ public_ip }}" + port: "{{ item.port }}" cidr: "{{ item.cidr | default('0.0.0.0/0') }}" with_items: cs_firewall when: public_ip is defined From 0480b44f508751df9a4dd1903baf9adf3491b6f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Mon, 23 Nov 2015 16:58:24 +0100 Subject: [PATCH 066/590] Allow debug var parameter to accept a list or dict. Fix https://github.com/ansible/ansible/issues/13252 --- lib/ansible/plugins/action/debug.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index 5a5a805e74..d3d4612440 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -41,9 +41,13 @@ class ActionModule(ActionBase): # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): results = self._templar.template(self._task.args['var'], convert_bare=True) - if results == self._task.args['var']: - results = "VARIABLE IS NOT DEFINED!" - result[self._task.args['var']] = results + if type(self._task.args['var']) in (list, dict): + # If var is a list or dict, use the type as key to display + result[str(type(self._task.args['var']))] = results + else: + if results == self._task.args['var']: + results = "VARIABLE IS NOT DEFINED!" + result[self._task.args['var']] = results else: result['msg'] = 'here we are' From adf2d53fa240fe0f628e0d8d2a2ff091d78cc830 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Nov 2015 08:41:19 -0800 Subject: [PATCH 067/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 1ff0704c0c..0e043f8c58 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 1ff0704c0c6dbc88e08f19620d7325aa5790d40f +Subproject commit 0e043f8c588a9cf3de63151fce5ae5afc03eb951 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 035ad1d140..e46e2e1d6f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 035ad1d140f6cbc6fd8fe310e8710f9572887ca0 +Subproject commit e46e2e1d6f32ffc7629e418ceb7bc4432fbaaf3e From 272778f732031eb98d417c4dd4f6947cdef08aea Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 23 Nov 2015 11:53:05 -0500 Subject: [PATCH 068/590] Modify task executor to reuse connection inside a loop. Fix WinRM connection to set _connected properly and display when remote shell is opened/closed. Add integration test using raw + with_items. --- lib/ansible/executor/task_executor.py | 6 ++++-- lib/ansible/plugins/connection/winrm.py | 16 +++++++++++----- .../roles/test_win_raw/tasks/main.yml | 13 +++++++++++++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 0e1ebb600c..e6e4cc3148 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -67,6 +67,7 @@ class TaskExecutor: self._new_stdin = new_stdin self._loader = loader self._shared_loader_obj = shared_loader_obj + self._connection = None def run(self): ''' @@ -361,8 +362,9 @@ class TaskExecutor: self._task.args = variable_params # get the connection and the handler for this execution - self._connection = self._get_connection(variables=variables, templar=templar) - self._connection.set_host_overrides(host=self._host) + if not self._connection or not getattr(self._connection, '_connected', False): + self._connection = self._get_connection(variables=variables, templar=templar) + self._connection.set_host_overrides(host=self._host) self._handler = self._get_action_handler(connection=self._connection, templar=templar) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index aa0c7b35e5..a506d39010 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -169,14 +169,16 @@ class Connection(ConnectionBase): rs = protocol.send_message(xmltodict.unparse(rq)) def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None): + if not self.protocol: + self.protocol = self._winrm_connect() + self._connected = True + if not self.shell_id: + self.shell_id = self.protocol.open_shell(codepage=65001) # UTF-8 + display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host) if from_exec: display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) else: display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) - if not self.protocol: - self.protocol = self._winrm_connect() - if not self.shell_id: - self.shell_id = self.protocol.open_shell(codepage=65001) # UTF-8 command_id = None try: stdin_push_failed = False @@ -211,6 +213,7 @@ class Connection(ConnectionBase): def _connect(self): if not self.protocol: self.protocol = self._winrm_connect() + self._connected = True return self def exec_command(self, cmd, in_data=None, sudoable=True): @@ -387,5 +390,8 @@ class Connection(ConnectionBase): def close(self): if self.protocol and self.shell_id: + display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host) self.protocol.close_shell(self.shell_id) - self.shell_id = None + self.shell_id = None + self.protocol = None + self._connected = False diff --git a/test/integration/roles/test_win_raw/tasks/main.yml b/test/integration/roles/test_win_raw/tasks/main.yml index 6351c516be..30c1a75e6b 100644 --- a/test/integration/roles/test_win_raw/tasks/main.yml +++ b/test/integration/roles/test_win_raw/tasks/main.yml @@ -101,3 +101,16 @@ assert: that: - "raw_result2.stdout_lines[0] == '--% icacls D:\\\\somedir\\\\ /grant \"! ЗАО. Руководство\":F'" + +# Assumes MaxShellsPerUser == 30 (the default) + +- name: test raw + with_items to verify that winrm connection is reused for each item + raw: echo "{{item}}" + with_items: "{{range(32)|list}}" + register: raw_with_items_result + +- name: check raw + with_items result + assert: + that: + - "not raw_with_items_result|failed" + - "raw_with_items_result.results|length == 32" From e06b107d2dfd4682d807b7e871d3bcba501d4636 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 23 Nov 2015 11:54:06 -0500 Subject: [PATCH 069/590] Template (and include vars) PlaybookInclude paths Fixes #13249 --- lib/ansible/playbook/playbook_include.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index 0f505bd3a9..d9af2ba523 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -55,9 +55,9 @@ class PlaybookInclude(Base, Conditional, Taggable): # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) - all_vars = dict() + all_vars = self.vars.copy() if variable_manager: - all_vars = variable_manager.get_vars(loader=loader) + all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): @@ -66,7 +66,7 @@ class PlaybookInclude(Base, Conditional, Taggable): # then we use the object to load a Playbook pb = Playbook(loader=loader) - file_name = new_obj.include + file_name = templar.template(new_obj.include) if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) From 5fa49a9ad87b75a789cdc8c9aa7b69b8bdf17c5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Mon, 23 Nov 2015 22:04:55 +0100 Subject: [PATCH 070/590] Use to_unicode instead of str() --- lib/ansible/plugins/action/debug.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index d3d4612440..a024e28b01 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -19,7 +19,7 @@ __metaclass__ = type from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean - +from ansible.utils.unicode import to_unicode class ActionModule(ActionBase): ''' Print statements during execution ''' @@ -43,7 +43,7 @@ class ActionModule(ActionBase): results = self._templar.template(self._task.args['var'], convert_bare=True) if type(self._task.args['var']) in (list, dict): # If var is a list or dict, use the type as key to display - result[str(type(self._task.args['var']))] = results + result[to_unicode(type(self._task.args['var']))] = results else: if results == self._task.args['var']: results = "VARIABLE IS NOT DEFINED!" From 3c39953dadc9ea8ebedde9d07779b33284df1f51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Tue, 24 Nov 2015 11:20:28 +0100 Subject: [PATCH 071/590] changelog: minor formating fix --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a18f6d43a..f99fc270cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -319,9 +319,11 @@ newline being stripped you can change your playbook like this: * Lookup, vars and action plugin pathing has been normalized, all now follow the same sequence to find relative files. * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. -* Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour:: +* Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: + ``` - debug: msg="The error message was: {{error_code |default('') }}" + ``` ## 1.9.4 "Dancing In the Street" - Oct 9, 2015 From 65747285a4d9db63ccd8ed9ad120690860e878b9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 24 Nov 2015 09:09:54 -0500 Subject: [PATCH 072/590] Properly check for prompting state when re-using ssh connection Fixes #13278 --- lib/ansible/executor/task_executor.py | 2 +- lib/ansible/plugins/connection/__init__.py | 5 +++++ lib/ansible/plugins/connection/ssh.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index e6e4cc3148..4a7d7464ef 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -362,7 +362,7 @@ class TaskExecutor: self._task.args = variable_params # get the connection and the handler for this execution - if not self._connection or not getattr(self._connection, '_connected', False): + if not self._connection or not getattr(self._connection, 'connected', False): self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 1ff5d8f30b..06616bac4c 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -75,6 +75,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): self.success_key = None self.prompt = None + self._connected = False # load the shell plugin for this action/connection if play_context.shell: @@ -88,6 +89,10 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): if not self._shell: raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type) + @property + def connected(self): + return self._connected + def _become_method_supported(self): ''' Checks if the current class supports this privilege escalation method ''' diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 8bbc031271..aa8eb77d56 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -372,7 +372,7 @@ class Connection(ConnectionBase): # wait for a password prompt. state = states.index('awaiting_prompt') display.debug('Initial state: %s: %s' % (states[state], self._play_context.prompt)) - elif self._play_context.become and self._play_context.success_key: + elif self._play_context.become and self._play_context.success_key and not self._connected: # We're requesting escalation without a password, so we have to # detect success/failure before sending any initial data. state = states.index('awaiting_escalation') From 70de8bc96f280329183226b56f3d633099ead857 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 24 Nov 2015 12:00:37 -0500 Subject: [PATCH 073/590] Fix ssh state issues by simply assuming it's never connected --- lib/ansible/plugins/connection/ssh.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index aa8eb77d56..d3a8877b60 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -60,7 +60,6 @@ class Connection(ConnectionBase): # management here. def _connect(self): - self._connected = True return self @staticmethod @@ -284,7 +283,7 @@ class Connection(ConnectionBase): for l in chunk.splitlines(True): suppress_output = False - # display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n'))) + #display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n'))) if self._play_context.prompt and self.check_password_prompt(l): display.debug("become_prompt: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n'))) self._flags['become_prompt'] = True @@ -372,7 +371,7 @@ class Connection(ConnectionBase): # wait for a password prompt. state = states.index('awaiting_prompt') display.debug('Initial state: %s: %s' % (states[state], self._play_context.prompt)) - elif self._play_context.become and self._play_context.success_key and not self._connected: + elif self._play_context.become and self._play_context.success_key: # We're requesting escalation without a password, so we have to # detect success/failure before sending any initial data. state = states.index('awaiting_escalation') From 0ec60ac09b86e434523fdfde929efa98c456fc8a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Nov 2015 10:49:03 -0800 Subject: [PATCH 074/590] added missing events to base class --- lib/ansible/plugins/callback/__init__.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 2b6875ae20..c845fe7003 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -304,3 +304,12 @@ class CallbackBase: def v2_playbook_on_include(self, included_file): pass #no v1 correspondance + + def v2_playbook_item_on_ok(self, result): + pass + + def v2_playbook_item_on_failed(self, result): + pass + + def v2_playbook_item_on_skipped(self, result) + pass From 7244b5ae49441de293d9fe382dae2f7b3e228944 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Nov 2015 10:52:42 -0800 Subject: [PATCH 075/590] added missing : --- lib/ansible/plugins/callback/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index c845fe7003..ccc8a7f8e5 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -311,5 +311,5 @@ class CallbackBase: def v2_playbook_item_on_failed(self, result): pass - def v2_playbook_item_on_skipped(self, result) + def v2_playbook_item_on_skipped(self, result): pass From b937018fcdee864ea85f209006e73c34c8003c87 Mon Sep 17 00:00:00 2001 From: dizzler Date: Tue, 24 Nov 2015 12:49:15 -0700 Subject: [PATCH 076/590] Fix typo in modules_core.rst --- docsite/rst/modules_core.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/modules_core.rst b/docsite/rst/modules_core.rst index 6364a1556f..4d692dad15 100644 --- a/docsite/rst/modules_core.rst +++ b/docsite/rst/modules_core.rst @@ -8,6 +8,6 @@ The source of these modules is hosted on GitHub in the `ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. -Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. +Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. Documentation updates for these modules can also be edited directly in the module itself and by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. From 90cd5fb30881ab85a74424a4127404ff5a5e5520 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Tue, 24 Nov 2015 15:26:58 -0500 Subject: [PATCH 077/590] use hubspot based ads instead of stored files --- docsite/_themes/srtd/layout.html | 6 +++--- .../_themes/srtd/static/images/banner_ad_1.png | Bin 4510 -> 0 bytes .../_themes/srtd/static/images/banner_ad_2.png | Bin 4951 -> 0 bytes 3 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 docsite/_themes/srtd/static/images/banner_ad_1.png delete mode 100644 docsite/_themes/srtd/static/images/banner_ad_2.png diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index f4d7a8a536..41b6b75c1d 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -169,7 +169,7 @@ - + - - + + + From 384b2e023476b7bee242a5c7c70ebad0b0dfb33f Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 2 Dec 2015 11:29:51 -0600 Subject: [PATCH 142/590] Get v2_playbook_on_start working * Move self._tqm.load_callbacks() earlier to ensure that v2_on_playbook_start can fire * Pass the playbook instance to v2_on_playbook_start * Add a _file_name instance attribute to the playbook --- lib/ansible/executor/playbook_executor.py | 6 ++++-- lib/ansible/playbook/__init__.py | 3 +++ lib/ansible/plugins/callback/__init__.py | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index d647c8246a..60a416af73 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -82,6 +82,10 @@ class PlaybookExecutor: if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] + else: + # make sure the tqm has callbacks loaded + self._tqm.load_callbacks() + self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() @@ -130,8 +134,6 @@ class PlaybookExecutor: entry['plays'].append(new_play) else: - # make sure the tqm has callbacks loaded - self._tqm.load_callbacks() self._tqm._unreachable_hosts.update(self._unreachable_hosts) # we are actually running plays diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 888299e1d9..0ae443f843 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -44,6 +44,7 @@ class Playbook: self._entries = [] self._basedir = os.getcwd() self._loader = loader + self._file_name = None @staticmethod def load(file_name, variable_manager=None, loader=None): @@ -61,6 +62,8 @@ class Playbook: # set the loaders basedir self._loader.set_basedir(self._basedir) + self._file_name = file_name + # dynamically load any plugins from the playbook directory for name, obj in get_all_plugin_loaders(): if obj.subdir: diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index ccc8a7f8e5..03eb58d99d 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -246,7 +246,7 @@ class CallbackBase: def v2_runner_on_file_diff(self, result, diff): pass #no v1 correspondance - def v2_playbook_on_start(self): + def v2_playbook_on_start(self, playbook): self.playbook_on_start() def v2_playbook_on_notify(self, result, handler): From eb7db067f9cb41837602de995543367d322bbaff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 10:32:10 -0800 Subject: [PATCH 143/590] Fix template test results on python2.6 --- test/integration/roles/test_template/files/foo-py26.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/roles/test_template/files/foo-py26.txt b/test/integration/roles/test_template/files/foo-py26.txt index 84279bc7b3..76b0bb56f7 100644 --- a/test/integration/roles/test_template/files/foo-py26.txt +++ b/test/integration/roles/test_template/files/foo-py26.txt @@ -3,6 +3,7 @@ templated_var_loaded { "bool": true, "multi_part": "1Foo", + "null_type": null, "number": 5, "string_num": "5" } From 8ff67e049451e48e5f79032da88435f570bb5311 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Dec 2015 12:40:46 -0500 Subject: [PATCH 144/590] Default msg param to AnsibleError to avoid serialization problems --- lib/ansible/errors/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py index a2411b7bef..017272af7c 100644 --- a/lib/ansible/errors/__init__.py +++ b/lib/ansible/errors/__init__.py @@ -44,7 +44,7 @@ class AnsibleError(Exception): which should be returned by the DataLoader() class. ''' - def __init__(self, message, obj=None, show_content=True): + def __init__(self, message="", obj=None, show_content=True): # we import this here to prevent an import loop problem, # since the objects code also imports ansible.errors from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject From a183972477de03c8f924525135908d4db258d44f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Dec 2015 14:16:08 -0500 Subject: [PATCH 145/590] Don't use play vars in HostVars Fixes #13398 --- lib/ansible/executor/task_queue_manager.py | 1 - lib/ansible/vars/__init__.py | 15 ++------------- lib/ansible/vars/hostvars.py | 5 ++--- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 0f8f16ee6c..d665000046 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -188,7 +188,6 @@ class TaskQueueManager: pass hostvars = HostVars( - play=new_play, inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 2c9d8aca33..d636e8d4b9 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -43,7 +43,6 @@ from ansible.template import Templar from ansible.utils.debug import debug from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.vars import combine_vars -from ansible.vars.hostvars import HostVars from ansible.vars.unsafe_proxy import wrap_var try: @@ -171,7 +170,8 @@ class VariableManager: return data - + # FIXME: include_hostvars is no longer used, and should be removed, but + # all other areas of code calling get_vars need to be fixed too def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters @@ -367,17 +367,6 @@ class VariableManager: variables['groups'] = dict() for (group_name, group) in iteritems(self._inventory.groups): variables['groups'][group_name] = [h.name for h in group.get_hosts()] - - #if include_hostvars: - # hostvars_cache_entry = self._get_cache_entry(play=play) - # if hostvars_cache_entry in HOSTVARS_CACHE: - # hostvars = HOSTVARS_CACHE[hostvars_cache_entry] - # else: - # hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self) - # HOSTVARS_CACHE[hostvars_cache_entry] = hostvars - # variables['hostvars'] = hostvars - # variables['vars'] = hostvars[host.get_name()] - if play: variables['role_names'] = [r._role_name for r in play.roles] diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index a82e683d74..afa00ec8a4 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -46,11 +46,10 @@ __all__ = ['HostVars'] class HostVars(collections.Mapping): ''' A special view of vars_cache that adds values from the inventory when needed. ''' - def __init__(self, play, inventory, variable_manager, loader): + def __init__(self, inventory, variable_manager, loader): self._lookup = dict() self._inventory = inventory self._loader = loader - self._play = play self._variable_manager = variable_manager self._cached_result = dict() @@ -68,7 +67,7 @@ class HostVars(collections.Mapping): if host is None: raise j2undefined - data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) + data = self._variable_manager.get_vars(loader=self._loader, host=host, include_hostvars=False) sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() if sha1_hash in self._cached_result: From 6559616a04a1171289933f90a189e92492a1c406 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 1 Dec 2015 22:41:23 -0800 Subject: [PATCH 146/590] updated docs for 2.0 api --- docsite/rst/developing_api.rst | 72 ++++++++++++++++++++++++++++++---- 1 file changed, 64 insertions(+), 8 deletions(-) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index 76cebb64f1..319417672e 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -17,11 +17,67 @@ This chapter discusses the Python API. .. _python_api: -Python API ----------- - The Python API is very powerful, and is how the ansible CLI and ansible-playbook -are implemented. +are implemented. In version 2.0 the core ansible got rewritten and the API was mostly rewritten. + +.. _python_api_20: + +Python API 2.0 +-------------- + +In 2.0 things get a bit more complicated to start, but you end up with much more discrete and readable classes:: + + + #!/usr/bin/python2 + + from collections import namedtuple + from ansible.parsing.dataloader import DataLoader + from ansible.vars import VariableManager + from ansible.inventory import Inventory + from ansible.playbook.play import Play + from ansible.executor.task_queue_manager import TaskQueueManager + + Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) + # initialize needed objects + variable_manager = VariableManager() + loader = DataLoader() + options = Options(connection='local', module_path='/path/to/mymodules', forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False) + passwords = dict(vault_pass='secret') + + # create inventory and pass to var manager + inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost') + variable_manager.set_inventory(inventory) + + # create play with tasks + play_source = dict( + name = "Ansible Play", + hosts = 'localhost', + gather_facts = 'no', + tasks = [ dict(action=dict(module='debug', args=(msg='Hello Galaxy!'))) ] + ) + play = Play().load(play_source, variable_manager=variable_manager, loader=loader) + + # actually run it + tqm = None + try: + tqm = TaskQueueManager( + inventory=inventory, + variable_manager=variable_manager, + loader=loader, + options=options, + passwords=passwords, + stdout_callback='default', + ) + result = tqm.run(play) + finally: + if tqm is not None: + tqm.cleanup() + + +.. _python_api_old: + +Python API pre 2.0 +------------------ It's pretty simple:: @@ -51,7 +107,7 @@ expressed in the :doc:`modules` documentation.:: A module can return any type of JSON data it wants, so Ansible can be used as a framework to rapidly build powerful applications and scripts. -.. _detailed_api_example: +.. _detailed_api_old_example: Detailed API Example ```````````````````` @@ -87,9 +143,9 @@ The following script prints out the uptime information for all hosts:: for (hostname, result) in results['dark'].items(): print "%s >>> %s" % (hostname, result) -Advanced programmers may also wish to read the source to ansible itself, for -it uses the Runner() API (with all available options) to implement the -command line tools ``ansible`` and ``ansible-playbook``. +Advanced programmers may also wish to read the source to ansible itself, +for it uses the API (with all available options) to implement the ``ansible`` +command line tools (``lib/ansible/cli/``). .. seealso:: From ac54ac618cf7a44f504a222142b749f18f4e2cef Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 14:48:27 -0800 Subject: [PATCH 147/590] Something's strange... let's see if python2.6 is really the same now... --- test/integration/roles/test_template/tasks/main.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index a35b93d9d9..28477d44e5 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -48,11 +48,13 @@ - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt - when: pyver.stdout != '2.6' -- name: copy known good into place - copy: src=foo-py26.txt dest={{output_dir}}/foo.txt - when: pyver.stdout == '2.6' +# Seems that python-2.6 now outputs the same format as everywhere else? +# when: pyver.stdout != '2.6' + +#- name: copy known good into place +# copy: src=foo-py26.txt dest={{output_dir}}/foo.txt +# when: pyver.stdout == '2.6' - name: compare templated file to known good shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt From 2a33a13a20b622e14d9b9a81da461890b816ad16 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Dec 2015 16:44:43 -0800 Subject: [PATCH 148/590] updated port version --- packaging/port/sysutils/ansible/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/port/sysutils/ansible/Makefile b/packaging/port/sysutils/ansible/Makefile index 10016f9908..ef71c95c6c 100644 --- a/packaging/port/sysutils/ansible/Makefile +++ b/packaging/port/sysutils/ansible/Makefile @@ -1,7 +1,7 @@ # $FreeBSD$ PORTNAME= ansible -PORTVERSION= 2.0 +PORTVERSION= 2.1 PORTREVISION= 1 CATEGORIES= python net-mgmt sysutils MASTER_SITES= http://releases.ansible.com/ansible/ From b85e6e008ddf0e5a3308afa8111bb058611a6f0b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Dec 2015 16:46:12 -0800 Subject: [PATCH 149/590] updated version that makefile uses --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d05cb3d448..879b416e60 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0 0.5.beta3 +2.1 From 9b81c35d06a598aef05e546a1476a4aa18d115f3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 20:52:58 -0800 Subject: [PATCH 150/590] Don't compare or merge str with unicode Fixes #13387 --- lib/ansible/module_utils/basic.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index d2cf09458e..527a4c0a6c 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -369,7 +369,12 @@ def return_values(obj): sensitive values pre-jsonification.""" if isinstance(obj, basestring): if obj: - yield obj + if isinstance(obj, bytes): + yield obj + else: + # Unicode objects should all convert to utf-8 + # (still must deal with surrogateescape on python3) + yield obj.encode('utf-8') return elif isinstance(obj, Sequence): for element in obj: @@ -391,10 +396,22 @@ def remove_values(value, no_log_strings): """ Remove strings in no_log_strings from value. If value is a container type, then remove a lot more""" if isinstance(value, basestring): - if value in no_log_strings: + if isinstance(value, unicode): + # This should work everywhere on python2. Need to check + # surrogateescape on python3 + bytes_value = value.encode('utf-8') + value_is_unicode = True + else: + bytes_value = value + value_is_unicode = False + if bytes_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: - value = value.replace(omit_me, '*' * 8) + bytes_value = bytes_value.replace(omit_me, '*' * 8) + if value_is_unicode: + value = unicode(bytes_value, 'utf-8', errors='replace') + else: + value = bytes_value elif isinstance(value, Sequence): return [remove_values(elem, no_log_strings) for elem in value] elif isinstance(value, Mapping): From 5fdfe6a0f262e24be6ad8ec906f983220169f5b8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 21:07:41 -0800 Subject: [PATCH 151/590] Add some test data that has unicode values --- test/units/module_utils/basic/test_no_log.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py index 24d38ddcfa..7b8c976c96 100644 --- a/test/units/module_utils/basic/test_no_log.py +++ b/test/units/module_utils/basic/test_no_log.py @@ -69,6 +69,8 @@ class TestRemoveValues(unittest.TestCase): 'three': ['amigos', 'musketeers', None, {'ping': 'pong', 'base': ['balls', 'raquets']}]}, frozenset(['nope'])), + ('Toshio くら', frozenset(['とみ'])), + (u'Toshio くら', frozenset(['とみ'])), ) dataset_remove = ( ('string', frozenset(['string']), OMIT), @@ -94,6 +96,8 @@ class TestRemoveValues(unittest.TestCase): ('This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery', frozenset(['enigma', 'mystery', 'secret']), 'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********'), + ('Toshio くらとみ', frozenset(['くらとみ']), 'Toshio ********'), + (u'Toshio くらとみ', frozenset(['くらとみ']), u'Toshio ********'), ) def test_no_removal(self): From e00012994e9d26156380863f83266ac26a536e5d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 21:09:53 -0800 Subject: [PATCH 152/590] Also some unicode tests for return_values() --- test/units/module_utils/basic/test_no_log.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py index 7b8c976c96..3cb5d7b64b 100644 --- a/test/units/module_utils/basic/test_no_log.py +++ b/test/units/module_utils/basic/test_no_log.py @@ -46,6 +46,8 @@ class TestReturnValues(unittest.TestCase): 'three': ['amigos', 'musketeers', None, {'ping': 'pong', 'base': ('balls', 'raquets')}]}, frozenset(['1', 'dos', 'amigos', 'musketeers', 'pong', 'balls', 'raquets'])), + (u'Toshio くらとみ', frozenset(['Toshio くらとみ'])), + ('Toshio くらとみ', frozenset(['Toshio くらとみ'])), ) def test_return_values(self): From 9caa2b0452fa6e70bb7ae3f6b5b979d812b36642 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 07:59:23 -0800 Subject: [PATCH 153/590] Revert "Update docs and example config for requiretty + pipelining change" This reverts commit f873cc0fb54f309aa9ece4e4127bdf1071d1bfd7. Reverting pipelining change for now due to hard to pin down bugs: #13410 #13411 --- docsite/rst/intro_configuration.rst | 21 +++++++-------------- examples/ansible.cfg | 12 ++++++------ 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index ca3fd00654..dda07fc450 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -799,22 +799,15 @@ pipelining ========== Enabling pipelining reduces the number of SSH operations required to -execute a module on the remote server, by executing many ansible modules without actual file transfer. -This can result in a very significant performance improvement when enabled. -As of Ansible 2.1.0 this option is enabled by default. +execute a module on the remote server, by executing many ansible modules without actual file transfer. +This can result in a very significant performance improvement when enabled, however when using "sudo:" operations you must +first disable 'requiretty' in /etc/sudoers on all managed hosts. -In previous versions, this option was disabled because of a bad interaction -with some sudo configurations. If sudo was configured to 'requiretty' for -operation then pipelining would not work and ansible would fail to connect -properly. This could be remedied by removing 'requiretty' in /etc/sudoers on -all managed hosts. +By default, this option is disabled to preserve compatibility with +sudoers configurations that have requiretty (the default on many distros), but is highly +recommended if you can enable it, eliminating the need for :doc:`playbooks_acceleration`:: -It is recommended to leave this option enabled. If you are stuck with an old -version of ansible your first choice option should be to remove requiretty -from the sudoers configuration and only disable pipelining if you cannot do -that. Enabling this eliminates the need for :doc:`playbooks_acceleration`:: - - pipelining=True + pipelining=False .. _accelerate_settings: diff --git a/examples/ansible.cfg b/examples/ansible.cfg index d77dfba0c0..74aef7a024 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -226,13 +226,13 @@ # Enabling pipelining reduces the number of SSH operations required to # execute a module on the remote server. This can result in a significant -# performance improvement when enabled. It is enabled by default. +# performance improvement when enabled, however when using "sudo:" you must +# first disable 'requiretty' in /etc/sudoers # -# In previous versions of ansible this was defaulted to off as it was -# incompatible with sudo's requiretty option. Ansible 2.1 and above contain -# a fix for that problem. -# -#pipelining = True +# By default, this option is disabled to preserve compatibility with +# sudoers configurations that have requiretty (the default on many distros). +# +#pipelining = False # if True, make ansible use scp if the connection type is ssh # (default is sftp) From fbb63d66e79a772642aea4db050401cc89332fac Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 08:00:28 -0800 Subject: [PATCH 154/590] Revert "Note crab and mgedmin's work to make pipelining compatible with sudo+requiretty" This reverts commit 1d8e178732dd7303ac2651377f75aaed96b5d037. Reverting for now due to hard to pin down bugs: #13410 #13411 --- CHANGELOG.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index efcbb2bdd3..f6c10c589b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,11 +3,6 @@ Ansible Changes By Release ## 2.1 TBD - ACTIVE DEVELOPMENT -###Major Changes: - -* A fix was applied to make ansible's pipelining mode work with sudo when sudo - is configured to use requiretty. Thanks to amenonsen and mgedmin! - ####New Modules: * cloudstack: cs_volume From e201a255d17a72b338be92b8db881effb79b5ece Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 08:01:05 -0800 Subject: [PATCH 155/590] Revert "Make sudo+requiretty and ANSIBLE_PIPELINING work together" This reverts commit f488de85997079f480d504f73537e3e33ff2495b. Reverting for now due to hard to pin down bugs: #13410 #13411 --- lib/ansible/constants.py | 2 +- lib/ansible/plugins/action/__init__.py | 6 ++-- lib/ansible/plugins/connection/ssh.py | 43 ++++++++++++++++++------- lib/ansible/plugins/shell/powershell.py | 2 +- lib/ansible/plugins/shell/sh.py | 11 ++----- 5 files changed, 39 insertions(+), 25 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 5837ecae80..08d522fcb6 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -237,7 +237,7 @@ DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-o ControlMaster=auto -o ControlPersist=60s') ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") -ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', True, boolean=True) +ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 73eb5e4346..64a3b51e5d 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -177,7 +177,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): if tmp and "tmp" in tmp: # tmp has already been created return False - if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES: + if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su': # tmp is necessary to store the module source code # or we want to keep the files on the target system return True @@ -439,9 +439,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp - python_interp = task_vars.get('ansible_python_interpreter', 'python') - - cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp, python_interpreter=python_interp) + cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp) cmd = cmd.strip() sudoable = True diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 607dcd667f..debe36bd32 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -241,7 +241,7 @@ class Connection(ConnectionBase): return self._command - def _send_initial_data(self, fh, in_data, tty=False): + def _send_initial_data(self, fh, in_data): ''' Writes initial data to the stdin filehandle of the subprocess and closes it. (The handle must be closed; otherwise, for example, "sftp -b -" will @@ -252,8 +252,6 @@ class Connection(ConnectionBase): try: fh.write(in_data) - if tty: - fh.write("__EOF__942d747a0772c3284ffb5920e234bd57__\n") fh.close() except (OSError, IOError): raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') @@ -316,7 +314,7 @@ class Connection(ConnectionBase): return ''.join(output), remainder - def _run(self, cmd, in_data, sudoable=True, tty=False): + def _run(self, cmd, in_data, sudoable=True): ''' Starts the command and communicates with it until it ends. ''' @@ -324,10 +322,25 @@ class Connection(ConnectionBase): display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]] display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) - # Start the given command. + # Start the given command. If we don't need to pipeline data, we can try + # to use a pseudo-tty (ssh will have been invoked with -tt). If we are + # pipelining data, or can't create a pty, we fall back to using plain + # old pipes. - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdin = p.stdin + p = None + if not in_data: + try: + # Make sure stdin is a proper pty to avoid tcgetattr errors + master, slave = pty.openpty() + p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = os.fdopen(master, 'w', 0) + os.close(slave) + except (OSError, IOError): + p = None + + if not p: + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = p.stdin # If we are using SSH password authentication, write the password into # the pipe we opened in _build_command. @@ -390,7 +403,7 @@ class Connection(ConnectionBase): # before we call select. if states[state] == 'ready_to_send' and in_data: - self._send_initial_data(stdin, in_data, tty) + self._send_initial_data(stdin, in_data) state += 1 while True: @@ -488,7 +501,7 @@ class Connection(ConnectionBase): if states[state] == 'ready_to_send': if in_data: - self._send_initial_data(stdin, in_data, tty) + self._send_initial_data(stdin, in_data) state += 1 # Now we're awaiting_exit: has the child process exited? If it has, @@ -544,9 +557,17 @@ class Connection(ConnectionBase): display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr) - cmd = self._build_command('ssh', '-tt', self.host, cmd) + # we can only use tty when we are not pipelining the modules. piping + # data into /usr/bin/python inside a tty automatically invokes the + # python interactive-mode but the modules are not compatible with the + # interactive-mode ("unexpected indent" mainly because of empty lines) - (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable, tty=True) + if in_data: + cmd = self._build_command('ssh', self.host, cmd) + else: + cmd = self._build_command('ssh', '-tt', self.host, cmd) + + (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable) return (returncode, stdout, stderr) diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py index 9fd1541b63..096a0cf95d 100644 --- a/lib/ansible/plugins/shell/powershell.py +++ b/lib/ansible/plugins/shell/powershell.py @@ -110,7 +110,7 @@ class ShellModule(object): ''' % dict(path=path) return self._encode_script(script) - def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None, python_interpreter=None): + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): cmd_parts = shlex.split(to_bytes(cmd), posix=False) cmd_parts = map(to_unicode, cmd_parts) if shebang and shebang.lower() == '#!powershell': diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 719ac83ffb..f1fa3565b7 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -138,17 +138,12 @@ class ShellModule(object): cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path) return cmd - def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None, python_interpreter='python'): + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): # don't quote the cmd if it's an empty string, because this will # break pipelining mode - env = env_string.strip() - exe = shebang.replace("#!", "").strip() - if cmd.strip() == '': - reader = "%s -uc 'import sys; [sys.stdout.write(s) for s in iter(sys.stdin.readline, \"__EOF__942d747a0772c3284ffb5920e234bd57__\\n\")]'|" % python_interpreter - cmd_parts = [env, reader, env, exe] - else: + if cmd.strip() != '': cmd = pipes.quote(cmd) - cmd_parts = [env, exe, cmd] + cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd] if arg_path is not None: cmd_parts.append(arg_path) new_cmd = " ".join(cmd_parts) From 5f83a6aeda131f519a47d929eabe1666a7dff21b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Dec 2015 11:29:09 -0500 Subject: [PATCH 156/590] Properly default remote_user for delegated-to hosts Fixes #13323 --- lib/ansible/playbook/play_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index b2b7e44497..5c02093980 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -350,7 +350,7 @@ class PlayContext(Base): if user_var in delegated_vars: break else: - delegated_vars['ansible_user'] = None + delegated_vars['ansible_user'] = task.remote_user or self.remote_user else: delegated_vars = dict() From 29f5c5db7178b3bb26f4dd8410269a44d17e5315 Mon Sep 17 00:00:00 2001 From: Peter Sprygada Date: Thu, 3 Dec 2015 12:50:23 -0500 Subject: [PATCH 157/590] bugfix for ios.py shared module argument creation This patch fixes a bug in module_utils/ios.py where the the wrong shared module arguments are being generated. This bug prevented the shared module from operating correctly. This patch should be generally applied. --- lib/ansible/module_utils/ios.py | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index dc46a860c6..085b68dcd2 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -80,7 +80,7 @@ def ios_module(**kwargs): """ spec = kwargs.get('argument_spec') or dict() - argument_spec = url_argument_spec() + argument_spec = shell_argument_spec() argument_spec.update(IOS_COMMON_ARGS) if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) @@ -150,21 +150,6 @@ class IosShell(object): responses.append(response) return responses -def ios_from_args(module): - """Extracts the set of argumetns to build a valid IOS connection - """ - params = dict() - for arg, attrs in IOS_COMMON_ARGS.iteritems(): - if module.params['device']: - params[arg] = module.params['device'].get(arg) - if arg not in params or module.params[arg]: - params[arg] = module.params[arg] - if params[arg] is None: - if attrs.get('required'): - module.fail_json(msg='argument %s is required' % arg) - params[arg] = attrs.get('default') - return params - def ios_connection(module): """Creates a connection to an IOS device based on the module arguments """ @@ -180,16 +165,16 @@ def ios_connection(module): shell = IosShell() shell.connect(host, port=port, username=username, password=password, timeout=timeout) + shell.send('terminal length 0') except paramiko.ssh_exception.AuthenticationException, exc: module.fail_json(msg=exc.message) except socket.error, exc: module.fail_json(msg=exc.strerror, errno=exc.errno) - shell.send('terminal length 0') - if module.params['enable_mode']: shell.authorize(module.params['enable_password']) return shell + From 4a4e7a6ebb9026bcb8118ca342380302014fbacf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 11:20:00 -0800 Subject: [PATCH 158/590] added extract filter to changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6c10c589b..f9f8b4b76a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,9 @@ Ansible Changes By Release ####New Modules: * cloudstack: cs_volume +####New Filters: +* extract + ## 2.0 "Over the Hills and Far Away" ###Major Changes: From 4426b7f6e03553cabd9d698c4912f48c523ca2d9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Dec 2015 14:22:27 -0500 Subject: [PATCH 159/590] fix sorting of groups for host vars Fixes #13371 --- lib/ansible/vars/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index d636e8d4b9..1184ec5049 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -234,7 +234,7 @@ class VariableManager: for item in data: all_vars = combine_vars(all_vars, item) - for group in host.get_groups(): + for group in sorted(host.get_groups(), key=lambda g: g.depth): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) @@ -404,7 +404,7 @@ class VariableManager: items = [] if task.loop is not None: if task.loop in lookup_loader: - #TODO: remove convert_bare true and deprecate this in with_ + #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: @@ -604,4 +604,3 @@ class VariableManager: if host_name not in self._vars_cache: self._vars_cache[host_name] = dict() self._vars_cache[host_name][varname] = value - From f467f1770f8885b657fa01270b6f2909249d1f93 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Dec 2015 15:25:54 -0500 Subject: [PATCH 160/590] Properly compare object references for Hosts when adding new ones Fixes #13397 --- lib/ansible/inventory/dir.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py index 7ae9611ddf..e4f7ee80f9 100644 --- a/lib/ansible/inventory/dir.py +++ b/lib/ansible/inventory/dir.py @@ -192,6 +192,8 @@ class InventoryDirectory(object): if group.name not in self.groups: # it's brand new, add him! self.groups[group.name] = group + # the Group class does not (yet) implement __eq__/__ne__, + # so unlike Host we do a regular comparison here if self.groups[group.name] != group: # different object, merge self._merge_groups(self.groups[group.name], group) @@ -200,7 +202,10 @@ class InventoryDirectory(object): if host.name not in self.hosts: # Papa's got a brand new host self.hosts[host.name] = host - if self.hosts[host.name] != host: + # because the __eq__/__ne__ methods in Host() compare the + # name fields rather than references, we use id() here to + # do the object comparison for merges + if id(self.hosts[host.name]) != id(host): # different object, merge self._merge_hosts(self.hosts[host.name], host) From cfeef81303b6c9e197b48783c49376f989f67e18 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 14:15:37 -0800 Subject: [PATCH 161/590] For now, skip tests of module_utils/basic functions that are failing on py3 (these are only run on the target hosts, not on the controller). --- test/units/module_utils/basic/test_heuristic_log_sanitize.py | 1 + test/units/module_utils/basic/test_no_log.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/test/units/module_utils/basic/test_heuristic_log_sanitize.py b/test/units/module_utils/basic/test_heuristic_log_sanitize.py index 51a5c11adf..14ffff0d74 100644 --- a/test/units/module_utils/basic/test_heuristic_log_sanitize.py +++ b/test/units/module_utils/basic/test_heuristic_log_sanitize.py @@ -85,6 +85,7 @@ class TestHeuristicLogSanitize(unittest.TestCase): self.assertTrue(ssh_output.endswith("}")) self.assertIn(":********@foo.com/data'", ssh_output) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_hides_parameter_secrets(self): output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret'])) self.assertNotIn('secret', output) diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py index 3cb5d7b64b..102b7a3ab2 100644 --- a/test/units/module_utils/basic/test_no_log.py +++ b/test/units/module_utils/basic/test_no_log.py @@ -50,6 +50,7 @@ class TestReturnValues(unittest.TestCase): ('Toshio くらとみ', frozenset(['Toshio くらとみ'])), ) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_return_values(self): for data, expected in self.dataset: self.assertEquals(frozenset(return_values(data)), expected) @@ -102,10 +103,12 @@ class TestRemoveValues(unittest.TestCase): (u'Toshio くらとみ', frozenset(['くらとみ']), u'Toshio ********'), ) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_no_removal(self): for value, no_log_strings in self.dataset_no_remove: self.assertEquals(remove_values(value, no_log_strings), value) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_strings_to_remove(self): for value, no_log_strings, expected in self.dataset_remove: self.assertEquals(remove_values(value, no_log_strings), expected) From 26520442bd0fe231abc0a4432c6f2943b61f8fb8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 28 Nov 2015 20:37:17 -0800 Subject: [PATCH 162/590] Now and/or shell expressions depend on shell plugin This should fix issues with fish shell users as && and || are not valid syntax, fish uses actual 'and' and 'or' programs. Also updated to allow for fish backticks pushed quotes to subshell, fish seems to handle spaces w/o them. Lastly, removed encompassing subshell () for fish compatibility. fixes #13199 --- lib/ansible/plugins/shell/fish.py | 7 +++++++ lib/ansible/plugins/shell/sh.py | 18 ++++++++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py index ff78941e19..342de99e5f 100644 --- a/lib/ansible/plugins/shell/fish.py +++ b/lib/ansible/plugins/shell/fish.py @@ -21,5 +21,12 @@ from ansible.plugins.shell.sh import ShellModule as ShModule class ShellModule(ShModule): + _SHELL_AND = '; and' + _SHELL_OR = '; or' + _SHELL_SUB_LEFT = '(' + _SHELL_SUB_RIGHT = ')' + _SHELL_GROUP_LEFT = '' + _SHELL_GROUP_RIGHT = '' + def env_prefix(self, **kwargs): return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index f1fa3565b7..7fbfa819ef 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -33,6 +33,12 @@ class ShellModule(object): # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\n' _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1' + _SHELL_AND = '&&' + _SHELL_OR = '||' + _SHELL_SUB_LEFT = '"$(' + _SHELL_SUB_RIGHT = ')"' + _SHELL_GROUP_LEFT = '(' + _SHELL_GROUP_RIGHT = ')' def env_prefix(self, **kwargs): '''Build command prefix with environment variables.''' @@ -71,14 +77,14 @@ class ShellModule(object): basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile) if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')): basetmp = self.join_path('/tmp', basefile) - cmd = 'mkdir -p "`echo %s`"' % basetmp - cmd += ' && echo "`echo %s`"' % basetmp + cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) + cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) # change the umask in a subshell to achieve the desired mode # also for directories created with `mkdir -p` if mode: tmp_umask = 0o777 & ~mode - cmd = '(umask %o && %s)' % (tmp_umask, cmd) + cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT) return cmd @@ -128,14 +134,14 @@ class ShellModule(object): # used by a variety of shells on the remote host to invoke a python # "one-liner". shell_escaped_path = pipes.quote(path) - test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp) + test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) csums = [ "({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3) "({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4 ] - cmd = " || ".join(csums) - cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path) + cmd = (" %s " % self._SHELL_OR).join(csums) + cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path) return cmd def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): From a1f516824ee2160121437edf6939ab2145972739 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 18:23:08 -0800 Subject: [PATCH 163/590] corrected playbook path, reformated options help the last just to make the help consistent and readable --- lib/ansible/cli/pull.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 04586c1d0c..9cc6c25e9f 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -74,8 +74,10 @@ class PullCLI(CLI): help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') - self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', help='directory to checkout repository to') - self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') + self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', + help='directory to checkout repository to') + self.parser.add_option('-U', '--url', dest='url', default=None, + help='URL of the playbook repository') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -174,8 +176,7 @@ class PullCLI(CLI): display.display("Repository has not changed, quitting.") return 0 - playbook = self.select_playbook(path) - + playbook = self.select_playbook(self.options.dest) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") From 8d5f36a6c23ad17116ee0bb24c07f83745efb8e0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 19:39:57 -0800 Subject: [PATCH 164/590] return unique list of hosts --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index fdcbd37e78..59a3c37bf9 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -196,7 +196,7 @@ class Inventory(object): hosts = [ h for h in hosts if h in self._restriction ] HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:] - return hosts + return list(set(hosts)) @classmethod def split_host_pattern(cls, pattern): From e1c62fb5afd5344dc1f3ff1606803263218b79ea Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 19:42:05 -0800 Subject: [PATCH 165/590] reverted to previous pull checkout dir behaviour This fixes bugs with not finding plays when not specifying checkout dir Also makes it backwards compatible --- lib/ansible/cli/pull.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 9cc6c25e9f..b2e402126d 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -74,7 +74,7 @@ class PullCLI(CLI): help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') - self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', + self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') @@ -90,6 +90,11 @@ class PullCLI(CLI): self.options, self.args = self.parser.parse_args() + if not self.options.dest: + hostname = socket.getfqdn() + # use a hostname dependent directory, in case of $HOME on nfs + self.options.dest = os.path.join('~/.ansible/pull', hostname) + if self.options.sleep: try: secs = random.randint(0,int(self.options.sleep)) From d5446f98046d379ec950b849317472982dcba757 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 20:47:02 -0800 Subject: [PATCH 166/590] fixed ansible-pull broken options * sudo was not working, now it supports full become * now default checkout dir works, not only when specifying * paths for checkout dir get expanded * fixed limit options for playbook * added verbose and debug info --- lib/ansible/cli/__init__.py | 12 +++++++----- lib/ansible/cli/pull.py | 25 ++++++++++++++++--------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index da4d1b92d3..da1aabcc69 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -210,7 +210,7 @@ class CLI(object): @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False, - async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False): + async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False): ''' create an options parser for most ansible scripts ''' # TODO: implement epilog parsing @@ -267,10 +267,6 @@ class CLI(object): if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', - help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, @@ -287,6 +283,12 @@ class CLI(object): help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) parser.add_option('--become-user', default=None, dest='become_user', type='string', help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) + + if runas_opts or runas_prompt_opts: + parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password (deprecated, use become)') + parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', + help='ask for su password (deprecated, use become)') parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index b2e402126d..1543c704d5 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -64,10 +64,12 @@ class PullCLI(CLI): subset_opts=True, inventory_opts=True, module_opts=True, + runas_prompt_opts=True, ) # options unique to pull - self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') + self.parser.add_option('--purge', default=False, action='store_true', + help='purge checkout after playbook run') self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') self.parser.add_option('-s', '--sleep', dest='sleep', default=None, @@ -94,6 +96,7 @@ class PullCLI(CLI): hostname = socket.getfqdn() # use a hostname dependent directory, in case of $HOME on nfs self.options.dest = os.path.join('~/.ansible/pull', hostname) + self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest)) if self.options.sleep: try: @@ -126,7 +129,7 @@ class PullCLI(CLI): node = platform.node() host = socket.getfqdn() limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]])) - base_opts = '-c local "%s"' % limit_opts + base_opts = '-c local ' if self.options.verbosity > 0: base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ]) @@ -137,7 +140,7 @@ class PullCLI(CLI): else: inv_opts = self.options.inventory - #TODO: enable more repo modules hg/svn? + #FIXME: enable more repo modules hg/svn? if self.options.module_name == 'git': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: @@ -157,8 +160,8 @@ class PullCLI(CLI): raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) - cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % ( - bin_path, inv_opts, base_opts, self.options.module_name, repo_opts + cmd = '%s/ansible -i "%s" %s -m %s -a "%s" "%s"' % ( + bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts ) for ev in self.options.extra_vars: @@ -170,6 +173,8 @@ class PullCLI(CLI): time.sleep(self.options.sleep) # RUN the Checkout command + display.debug("running ansible with VCS module to checkout repo") + display.vvvv('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if rc != 0: @@ -193,16 +198,18 @@ class PullCLI(CLI): cmd += ' -i "%s"' % self.options.inventory for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev - if self.options.ask_sudo_pass: - cmd += ' -K' + if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass: + cmd += ' --ask-become-pass' if self.options.tags: cmd += ' -t "%s"' % self.options.tags - if self.options.limit: - cmd += ' -l "%s"' % self.options.limit + if self.options.subset: + cmd += ' -l "%s"' % self.options.subset os.chdir(self.options.dest) # RUN THE PLAYBOOK COMMAND + display.debug("running ansible-playbook to do actual work") + display.debug('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if self.options.purge: From e385c91fa528cb5e835077331512307b231ba393 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 09:57:06 -0800 Subject: [PATCH 167/590] Update submodule refs# --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cd9a7667aa..191347676e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cd9a7667aa39bbc1ccd606ebebaf3c62f228d601 +Subproject commit 191347676eea08817da3fb237f24cdbf2d16e307 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 3c4f954f0f..a10bdd6be9 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 3c4f954f0fece5dcb3241d6d5391273334206241 +Subproject commit a10bdd6be948d3aa5fad7ff4959908d6e78e0528 From 750adbaa270bca5a63f443808a7b8ddc2a026d9a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 4 Dec 2015 12:48:56 -0500 Subject: [PATCH 168/590] Changing up how host (in)equality is checked Fixes #13397 --- lib/ansible/inventory/dir.py | 2 +- lib/ansible/inventory/host.py | 2 +- test/units/inventory/test_host.py | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py index e4f7ee80f9..e716987fd5 100644 --- a/lib/ansible/inventory/dir.py +++ b/lib/ansible/inventory/dir.py @@ -205,7 +205,7 @@ class InventoryDirectory(object): # because the __eq__/__ne__ methods in Host() compare the # name fields rather than references, we use id() here to # do the object comparison for merges - if id(self.hosts[host.name]) != id(host): + if self.hosts[host.name] != host: # different object, merge self._merge_hosts(self.hosts[host.name], host) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index a561b951b4..a433463fa1 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -38,7 +38,7 @@ class Host: def __eq__(self, other): if not isinstance(other, Host): return False - return self.name == other.name + return id(self) == id(other) def __ne__(self, other): return not self.__eq__(other) diff --git a/test/units/inventory/test_host.py b/test/units/inventory/test_host.py index 078d4321b5..5c0945f7b4 100644 --- a/test/units/inventory/test_host.py +++ b/test/units/inventory/test_host.py @@ -29,9 +29,7 @@ class TestHost(unittest.TestCase): def test_equality(self): self.assertEqual(self.hostA, self.hostA) self.assertNotEqual(self.hostA, self.hostB) - self.assertEqual(self.hostA, Host('a')) - # __ne__ is a separate method - self.assertFalse(self.hostA != Host('a')) + self.assertNotEqual(self.hostA, Host('a')) def test_hashability(self): # equality implies the hash values are the same From 84507aedd4b4a4be48acf9657b90bb341c3bd1e2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 4 Dec 2015 13:33:27 -0500 Subject: [PATCH 169/590] Adding a uuid field so we can track host equality across serialization too --- lib/ansible/inventory/host.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index a433463fa1..6263dcbc80 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import uuid + from ansible.inventory.group import Group from ansible.utils.vars import combine_vars @@ -38,7 +40,7 @@ class Host: def __eq__(self, other): if not isinstance(other, Host): return False - return id(self) == id(other) + return self._uuid == other._uuid def __ne__(self, other): return not self.__eq__(other) @@ -55,6 +57,7 @@ class Host: name=self.name, vars=self.vars.copy(), address=self.address, + uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, ) @@ -65,6 +68,7 @@ class Host: self.name = data.get('name') self.vars = data.get('vars', dict()) self.address = data.get('address', '') + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -84,6 +88,7 @@ class Host: self.set_variable('ansible_port', int(port)) self._gathered_facts = False + self._uuid = uuid.uuid4() def __repr__(self): return self.get_name() From 0434644d12c64918d5182a7c0b0057687b1cdbc2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 11:50:39 -0800 Subject: [PATCH 170/590] Transform exceptions into ansible messages via to_unicode instead of str to avoid tracebacks. Fixes #13385 --- lib/ansible/executor/task_executor.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 4a7d7464ef..5d7430fad2 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -146,7 +146,7 @@ class TaskExecutor: except AttributeError: pass except Exception as e: - display.debug("error closing connection: %s" % to_unicode(e)) + display.debug(u"error closing connection: %s" % to_unicode(e)) def _get_loop_items(self): ''' @@ -183,7 +183,7 @@ class TaskExecutor: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: - if 'has no attribute' in str(e): + if u'has no attribute' in to_unicode(e): loop_terms = [] display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: @@ -231,7 +231,7 @@ class TaskExecutor: tmp_task = self._task.copy() tmp_play_context = self._play_context.copy() except AnsibleParserError as e: - results.append(dict(failed=True, msg=str(e))) + results.append(dict(failed=True, msg=to_unicode(e))) continue # now we swap the internal task and play context with their copies, @@ -401,7 +401,7 @@ class TaskExecutor: try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: - return dict(unreachable=True, msg=str(e)) + return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") if self._task.async > 0: @@ -412,7 +412,7 @@ class TaskExecutor: return result result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: - return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) + return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) From e8954e556a6f36e0eaeb8160bc04171ed655c43f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CBrice?= Date: Fri, 4 Dec 2015 16:24:19 -0500 Subject: [PATCH 171/590] comment examples in default hosts file --- examples/hosts | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/examples/hosts b/examples/hosts index ce4cbb7caa..841f4bc650 100644 --- a/examples/hosts +++ b/examples/hosts @@ -10,35 +10,35 @@ # Ex 1: Ungrouped hosts, specify before any group headers. -green.example.com -blue.example.com -192.168.100.1 -192.168.100.10 +## green.example.com +## blue.example.com +## 192.168.100.1 +## 192.168.100.10 # Ex 2: A collection of hosts belonging to the 'webservers' group -[webservers] -alpha.example.org -beta.example.org -192.168.1.100 -192.168.1.110 +## [webservers] +## alpha.example.org +## beta.example.org +## 192.168.1.100 +## 192.168.1.110 # If you have multiple hosts following a pattern you can specify # them like this: -www[001:006].example.com +## www[001:006].example.com # Ex 3: A collection of database servers in the 'dbservers' group -[dbservers] - -db01.intranet.mydomain.net -db02.intranet.mydomain.net -10.25.1.56 -10.25.1.57 +## [dbservers] +## +## db01.intranet.mydomain.net +## db02.intranet.mydomain.net +## 10.25.1.56 +## 10.25.1.57 # Here's another example of host ranges, this time there are no # leading 0s: -db-[99:101]-node.example.com +## db-[99:101]-node.example.com From 1eb0a1ddf7cf2f9501ea48915307652e8ab55049 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 15:16:02 -0800 Subject: [PATCH 172/590] Correct VERSION in the devel branch --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 879b416e60..7ec1d6db40 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1 +2.1.0 From a96a879fcf8c80ee37ff3898f729d7baeac1cd6f Mon Sep 17 00:00:00 2001 From: sam-at-github Date: Sat, 5 Dec 2015 13:06:58 +1100 Subject: [PATCH 173/590] Add fullstop to make sentence make sense. Touch parargraph while at it. --- docsite/rst/playbooks_variables.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 18f1e57f72..307387a72e 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -793,8 +793,8 @@ Basically, anything that goes into "role defaults" (the defaults folder inside t .. rubric:: Footnotes -.. [1] Tasks in each role will see their own role's defaults tasks outside of roles will the last role's defaults -.. [2] Variables defined in inventory file or provided by dynamic inventory +.. [1] Tasks in each role will see their own role's defaults. Tasks defined outside of a role will see the last role's defaults. +.. [2] Variables defined in inventory file or provided by dynamic inventory. .. note:: Within a any section, redefining a var will overwrite the previous instance. If multiple groups have the same variable, the last one loaded wins. From fa71c38c2a7332ed450464e9239aac6e6698b095 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 5 Dec 2015 01:47:35 -0500 Subject: [PATCH 174/590] updated pull location in changelog it was in between of backslash description and example --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9f8b4b76a..d246be1093 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,9 +37,9 @@ Ansible Changes By Release * New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. `ansible_ssh_extra_args` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings). +* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. * Backslashes used when specifying parameters in jinja2 expressions in YAML dicts sometimes needed to be escaped twice. This has been fixed so that escaping once works. Here's an example of how playbooks need to be modified: -* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. ``` # Syntax in 1.9.x From 0129fb0a44080d324d110c3d5c5223ab2aa138b2 Mon Sep 17 00:00:00 2001 From: Nils Steinger Date: Sat, 5 Dec 2015 15:28:37 +0100 Subject: [PATCH 175/590] Remove duplicates from host list *before* caching it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ansible previously added hosts to the host list multiple times for commands like `ansible -i 'localhost,' -c local -m ping 'localhost,localhost' --list-hosts`. 8d5f36a fixed the obvious error, but still added the un-deduplicated list to a cache, so all future invocations of get_hosts() would retrieve a non-deduplicated list. This caused problems down the line: For some reason, Ansible only ever schedules "flush_handlers" tasks (instead of scheduling any actual tasks from the playbook) for hosts that are contained in the host lists multiple times. This probably happens because the host states are stored in a dictionary indexed by the hostnames, so duplicate hostname would cause the state to be overwritten by subsequent invocations of … something. --- lib/ansible/inventory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 59a3c37bf9..14cd169265 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -195,8 +195,8 @@ class Inventory(object): if self._restriction is not None: hosts = [ h for h in hosts if h in self._restriction ] - HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:] - return list(set(hosts)) + HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) + return HOSTS_PATTERNS_CACHE[pattern_hash][:] @classmethod def split_host_pattern(cls, pattern): From a1f6d17e37b059aa9d34a004b0aed05a6b8fa3b3 Mon Sep 17 00:00:00 2001 From: Nils Steinger Date: Sat, 5 Dec 2015 15:40:49 +0100 Subject: [PATCH 176/590] More meaningful string representation for meta tasks (like 'noop' and 'flush_handlers') --- lib/ansible/playbook/task.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 4f326b628b..21dbc87bec 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -133,7 +133,10 @@ class Task(Base, Conditional, Taggable, Become): def __repr__(self): ''' returns a human readable representation of the task ''' - return "TASK: %s" % self.get_name() + if self.get_name() == 'meta ': + return "TASK: meta (%s)" % self.args['_raw_params'] + else: + return "TASK: %s" % self.get_name() def _preprocess_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' From f89f906f87c2c4d850702404f70cfabaa63be351 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 5 Dec 2015 10:10:25 -0500 Subject: [PATCH 177/590] simplified get_hosts code to have 1 retrun point --- lib/ansible/inventory/__init__.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 14cd169265..d7d0f03fb1 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -178,24 +178,24 @@ class Inventory(object): if self._restriction: pattern_hash += u":%s" % to_unicode(self._restriction) - if pattern_hash in HOSTS_PATTERNS_CACHE: - return HOSTS_PATTERNS_CACHE[pattern_hash][:] + if pattern_hash not in HOSTS_PATTERNS_CACHE: - patterns = Inventory.split_host_pattern(pattern) - hosts = self._evaluate_patterns(patterns) + patterns = Inventory.split_host_pattern(pattern) + hosts = self._evaluate_patterns(patterns) - # mainly useful for hostvars[host] access - if not ignore_limits_and_restrictions: - # exclude hosts not in a subset, if defined - if self._subset: - subset = self._evaluate_patterns(self._subset) - hosts = [ h for h in hosts if h in subset ] + # mainly useful for hostvars[host] access + if not ignore_limits_and_restrictions: + # exclude hosts not in a subset, if defined + if self._subset: + subset = self._evaluate_patterns(self._subset) + hosts = [ h for h in hosts if h in subset ] - # exclude hosts mentioned in any restriction (ex: failed hosts) - if self._restriction is not None: - hosts = [ h for h in hosts if h in self._restriction ] + # exclude hosts mentioned in any restriction (ex: failed hosts) + if self._restriction is not None: + hosts = [ h for h in hosts if h in self._restriction ] + + HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) - HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) return HOSTS_PATTERNS_CACHE[pattern_hash][:] @classmethod From 8ea45e8608fc15e07493b11ce28fe3d3f38865b8 Mon Sep 17 00:00:00 2001 From: Luca Berruti Date: Sat, 5 Dec 2015 19:43:02 +0100 Subject: [PATCH 178/590] Make no_target_syslog consistent. no_target_syslog = False --> do log on target --- examples/ansible.cfg | 2 +- lib/ansible/constants.py | 2 +- lib/ansible/plugins/action/__init__.py | 2 +- lib/ansible/plugins/action/async.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 74aef7a024..87c089f45a 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -182,7 +182,7 @@ #no_log = False # prevents logging of tasks, but only on the targets, data is still logged on the master/controller -#no_target_syslog = True +#no_target_syslog = False # controls the compression level of variables sent to # worker processes. At the default of 0, no compression diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 08d522fcb6..6faae928db 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -159,7 +159,7 @@ DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', # disclosure DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True) -DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', True, boolean=True) +DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 64a3b51e5d..497143224a 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -382,7 +382,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): module_args['_ansible_check_mode'] = True # set no log in the module arguments, if required - if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG: + if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG: module_args['_ansible_no_log'] = True # set debug in the module arguments, if required diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 51e2413af2..8a7175aeb8 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -48,7 +48,7 @@ class ActionModule(ActionBase): env_string = self._compute_environment_string() module_args = self._task.args.copy() - if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG: + if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG: module_args['_ansible_no_log'] = True # configure, upload, and chmod the target module From 955710267c1992c5e3b5b9eb77f4c76e289e3313 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 5 Dec 2015 15:59:51 -0500 Subject: [PATCH 179/590] only set become defaults at last possible moment tasks were overriding commandline with their defaults, not with the explicit setting, removed the setting of defaults from task init and pushed down to play context at last possible moment. fixes #13362 --- lib/ansible/playbook/become.py | 16 +++++++++------- lib/ansible/playbook/play_context.py | 3 +++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index 643f2b555d..1e579751d4 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -90,16 +90,18 @@ class Become: display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)") - # if we are becoming someone else, but some fields are unset, - # make sure they're initialized to the default config values - if ds.get('become', False): - if ds.get('become_method', None) is None: - ds['become_method'] = C.DEFAULT_BECOME_METHOD - if ds.get('become_user', None) is None: - ds['become_user'] = C.DEFAULT_BECOME_USER return ds + def set_become_defaults(self, become, become_method, become_user): + ''' if we are becoming someone else, but some fields are unset, + make sure they're initialized to the default config values ''' + if become: + if become_method is None: + become_method = C.DEFAULT_BECOME_METHOD + if become_user is None: + become_user = C.DEFAULT_BECOME_USER + def _get_attr_become(self): ''' Override for the 'become' getattr fetcher, used from Base. diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 5c02093980..9320a23ed9 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -392,6 +392,9 @@ class PlayContext(Base): if new_info.no_log is None: new_info.no_log = C.DEFAULT_NO_LOG + # set become defaults if not previouslly set + task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user) + return new_info def make_become_cmd(self, cmd, executable=None): From 41773630edcf8ab138a36290c4904c6ba537390b Mon Sep 17 00:00:00 2001 From: Peter Sprygada Date: Mon, 23 Nov 2015 22:01:27 -0500 Subject: [PATCH 180/590] adds new device argument to nxapi command arguments The device argument allows a dict of nxapi parameters to be passed to the module to simplify passing the nxapi parameters --- lib/ansible/module_utils/nxapi.py | 75 ++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/lib/ansible/module_utils/nxapi.py b/lib/ansible/module_utils/nxapi.py index 0589b9a50c..35bcc442fb 100644 --- a/lib/ansible/module_utils/nxapi.py +++ b/lib/ansible/module_utils/nxapi.py @@ -32,16 +32,16 @@ from ansible.module_utils.nxapi import * The nxapi module provides the following common argument spec: - * host (str) - [Required] The IPv4 address or FQDN of the network device + * host (str) - The IPv4 address or FQDN of the network device * port (str) - Overrides the default port to use for the HTTP/S connection. The default values are 80 for HTTP and 443 for HTTPS - * url_username (str) - [Required] The username to use to authenticate + * username (str) - The username to use to authenticate the HTTP/S connection. Aliases: username - * url_password (str) - [Required] The password to use to authenticate + * password (str) - The password to use to authenticate the HTTP/S connection. Aliases: password * use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS) @@ -51,6 +51,10 @@ The nxapi module provides the following common argument spec: device. Valid values in `cli_show`, `cli_show_ascii`, 'cli_conf` and `bash`. The default value is `cli_show_ascii` + * device (dict) - Used to send the entire set of connection parameters + as a dict object. This argument is mutually exclusive with the + host argument + In order to communicate with Cisco NXOS devices, the NXAPI feature must be enabled and configured on the device. @@ -58,34 +62,52 @@ must be enabled and configured on the device. NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash'] -def nxapi_argument_spec(spec=None): - """Creates an argument spec for working with NXAPI - """ - arg_spec = url_argument_spec() - arg_spec.update(dict( - host=dict(required=True), - port=dict(), - url_username=dict(required=True, aliases=['username']), - url_password=dict(required=True, aliases=['password']), - use_ssl=dict(default=False, type='bool'), - command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES) - )) - if spec: - arg_spec.update(spec) - return arg_spec +NXAPI_COMMON_ARGS = dict( + host=dict(), + port=dict(), + username=dict(), + password=dict(), + use_ssl=dict(default=False, type='bool'), + device=dict(), + command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES) +) -def nxapi_url(module): +def nxapi_module(**kwargs): + """Append the common args to the argument_spec + """ + spec = kwargs.get('argument_spec') or dict() + + argument_spec = url_argument_spec() + argument_spec.update(NXAPI_COMMON_ARGS) + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + + module = AnsibleModule(**kwargs) + + device = module.params.get('device') or dict() + for key, value in device.iteritems(): + if key in NXAPI_COMMON_ARGS: + module.params[key] = value + + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + if key != 'device': + module.params[key] = value + + return module + +def nxapi_url(params): """Constructs a valid NXAPI url """ - if module.params['use_ssl']: + if params['use_ssl']: proto = 'https' else: proto = 'http' - host = module.params['host'] + host = params['host'] url = '{}://{}'.format(proto, host) - port = module.params['port'] - if module.params['port']: - url = '{}:{}'.format(url, module.params['port']) + if params['port']: + url = '{}:{}'.format(url, params['port']) url = '{}/ins'.format(url) return url @@ -109,7 +131,7 @@ def nxapi_body(commands, command_type, **kwargs): def nxapi_command(module, commands, command_type=None, **kwargs): """Sends the list of commands to the device over NXAPI """ - url = nxapi_url(module) + url = nxapi_url(module.params) command_type = command_type or module.params['command_type'] @@ -118,6 +140,9 @@ def nxapi_command(module, commands, command_type=None, **kwargs): headers = {'Content-Type': 'text/json'} + module.params['url_username'] = module.params['username'] + module.params['url_password'] = module.params['password'] + response, headers = fetch_url(module, url, data=data, headers=headers, method='POST') From a8e015cc22d248e965157605e30b810de280b0a4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 6 Dec 2015 22:12:48 -0800 Subject: [PATCH 181/590] Add representers so we can output yaml for all the types we read in from yaml --- lib/ansible/parsing/yaml/dumper.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py index a51289b09b..a8a5015b8e 100644 --- a/lib/ansible/parsing/yaml/dumper.py +++ b/lib/ansible/parsing/yaml/dumper.py @@ -22,7 +22,7 @@ __metaclass__ = type import yaml from ansible.compat.six import PY3 -from ansible.parsing.yaml.objects import AnsibleUnicode +from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping from ansible.vars.hostvars import HostVars class AnsibleDumper(yaml.SafeDumper): @@ -50,3 +50,13 @@ AnsibleDumper.add_representer( represent_hostvars, ) +AnsibleDumper.add_representer( + AnsibleSequence, + yaml.representer.SafeRepresenter.represent_list, +) + +AnsibleDumper.add_representer( + AnsibleMapping, + yaml.representer.SafeRepresenter.represent_dict, +) + From 4d637e5780503448840a3e4ef824b8f72aa5112a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 6 Dec 2015 22:16:31 -0800 Subject: [PATCH 182/590] Use self.args when we parse arguments that way the arguments can be constructed manually --- lib/ansible/cli/adhoc.py | 2 +- lib/ansible/cli/doc.py | 2 +- lib/ansible/cli/galaxy.py | 2 +- lib/ansible/cli/playbook.py | 2 +- lib/ansible/cli/pull.py | 2 +- lib/ansible/cli/vault.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 25f29fc297..120b230211 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -70,7 +70,7 @@ class AdHocCLI(CLI): help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, default=C.DEFAULT_MODULE_NAME) - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) if len(self.args) != 1: raise AnsibleOptionsError("Missing target hosts") diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 4eef1dd5dd..a17164eb50 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -62,7 +62,7 @@ class DocCLI(CLI): self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity def run(self): diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 31c21146fc..94c04614ac 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -113,7 +113,7 @@ class GalaxyCLI(CLI): help='Force overwriting an existing role') # get options, args and galaxy object - self.options, self.args =self.parser.parse_args() + self.options, self.args =self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity self.galaxy = Galaxy(self.options) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index fc81f96456..a9c0ed018d 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -72,7 +72,7 @@ class PlaybookCLI(CLI): parser.add_option('--start-at-task', dest='start_at_task', help="start the playbook at the task matching this name") - self.options, self.args = parser.parse_args() + self.options, self.args = parser.parse_args(self.args[1:]) self.parser = parser diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 1543c704d5..593d601e8d 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -90,7 +90,7 @@ class PullCLI(CLI): help='verify GPG signature of checked out commit, if it fails abort running the playbook.' ' This needs the corresponding VCS module to support such an operation') - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) if not self.options.dest: hostname = socket.getfqdn() diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index ac148d4770..9908f17e57 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -69,7 +69,7 @@ class VaultCLI(CLI): elif self.action == "rekey": self.parser.set_usage("usage: %prog rekey [options] file_name") - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity can_output = ['encrypt', 'decrypt'] From 2c8eee956fb574ab0ef2ae362a2936f95a2d80cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Mon, 7 Dec 2015 09:25:37 +0100 Subject: [PATCH 183/590] Fix issue when var name is the same as content. See https://github.com/ansible/ansible/issues/13453 for more details. --- lib/ansible/plugins/action/debug.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index a024e28b01..1d8e28c7a4 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -45,8 +45,12 @@ class ActionModule(ActionBase): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results else: + # If var name is same as result, try to template it if results == self._task.args['var']: - results = "VARIABLE IS NOT DEFINED!" + try: + results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True) + except: + results = "VARIABLE IS NOT DEFINED!" result[self._task.args['var']] = results else: result['msg'] = 'here we are' From dcedfbe26c2aacc901fe5ef84b51103feb92990f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 7 Dec 2015 09:54:55 -0800 Subject: [PATCH 184/590] corrected usage of ec2.py's profile option this was never introduced into ansible-playbook though the docs stated otherwise. We still explain how to use the env var to get the same result. --- docsite/rst/intro_dynamic_inventory.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 1a2bd6f72c..5f491ebc2e 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -111,9 +111,8 @@ If you use boto profiles to manage multiple AWS accounts, you can pass ``--profi aws_access_key_id = aws_secret_access_key = -You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, or run playbooks with: ``ansible-playbook -i 'ec2.py --profile prod' myplaybook.yml``. - -Alternatively, use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml`` +You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, this option is not supported by ``anisble-playbook`` though. +But you can use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml`` Since each region requires its own API call, if you are only using a small set of regions, feel free to edit ``ec2.ini`` and list only the regions you are interested in. There are other config options in ``ec2.ini`` including cache control, and destination variables. From 97626475db9fab72c27a7904d8e745638a6dde1f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 7 Dec 2015 10:04:48 -0800 Subject: [PATCH 185/590] added new ec2_vpc_net_facts to 2.1 changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d246be1093..36886531bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Ansible Changes By Release ## 2.1 TBD - ACTIVE DEVELOPMENT ####New Modules: +* aws: ec2_vpc_net_facts * cloudstack: cs_volume ####New Filters: From 9ae1dede0387c02b0f3772f168e94c99ce9f23a8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 06:36:04 -0800 Subject: [PATCH 186/590] adhoc does not load plugins by default reimplemented feature from 1.x which kept additional callbacks from poluting adhoc unless specifically asked for through configuration. --- lib/ansible/cli/adhoc.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 120b230211..912b07a5c7 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,6 +163,9 @@ class AdHocCLI(CLI): else: cb = 'minimal' + if not C.DEFAULT_LOAD_CALLBACK_PLUGINS: + C.DEFAULT_CALLBACK_WHITELIST = [] + if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree From 8d500215b68aafe49c0416867af3fc701addf602 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 12 Nov 2015 16:15:42 -0500 Subject: [PATCH 187/590] trigger jenkins integration tests --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index cec8ccca97..2e1f15559d 100644 --- a/README.md +++ b/README.md @@ -55,3 +55,4 @@ Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.de Ansible is sponsored by [Ansible, Inc](http://ansible.com) + From 970d7cadb7f50e5f55b3aa1c12af130957f67204 Mon Sep 17 00:00:00 2001 From: David L Ballenger Date: Tue, 8 Dec 2015 07:11:02 -0800 Subject: [PATCH 188/590] Add ssh_host support for MacOSX El Capitan. OS X El Capitan moved the /etc/ssh_* files into /etc/ssh/. This fix adds a distribution version check for Darwin to set the keydir appropriately on El Capitan and later. --- lib/ansible/module_utils/facts.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4120a51fb5..94a5a11f72 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -524,7 +524,10 @@ class Facts(object): keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519') if self.facts['system'] == 'Darwin': - keydir = '/etc' + if self.facts['distribution'] == 'MacOSX' and LooseVersion(self.facts['distribution_version']) >= LooseVersion('10.11') : + keydir = '/etc/ssh' + else: + keydir = '/etc' else: keydir = '/etc/ssh' From 9c4eae525306bf201304a15d36f531b0308cd25e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 11:55:35 -0500 Subject: [PATCH 189/590] Fix always_run support in the action plugin for template when copying Fixes #13418 --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 109f3e80c0..5edc4e8a2c 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -157,7 +157,7 @@ class ActionModule(ActionBase): if self._play_context.diff: diff = self._get_diff_data(dest, resultant, task_vars, source_file=False) - if not self._play_context.check_mode: # do actual work thorugh copy + if not self._play_context.check_mode or self._task.always_run: # do actual work thorugh copy xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user From 5cac8efd73ff39268d2bebc1f501e3ae662add9d Mon Sep 17 00:00:00 2001 From: Jeremy Audet Date: Tue, 8 Dec 2015 09:39:45 -0500 Subject: [PATCH 190/590] Make "make webdocs" compatible with Python 3 The `webdocs` make target fails under Python 3. It fails due to a variety of syntax errors, such as the use of `except Foo, e` and `print 'foo'`. Fix #13463 by making code compatible with both Python 2 and 3. --- docsite/build-site.py | 23 ++++++++++++----------- hacking/module_formatter.py | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docsite/build-site.py b/docsite/build-site.py index 587a189f07..24f9fc9a64 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import print_function __docformat__ = 'restructuredtext' @@ -24,9 +25,9 @@ import traceback try: from sphinx.application import Sphinx except ImportError: - print "#################################" - print "Dependency missing: Python Sphinx" - print "#################################" + print("#################################") + print("Dependency missing: Python Sphinx") + print("#################################") sys.exit(1) import os @@ -40,7 +41,7 @@ class SphinxBuilder(object): """ Run the DocCommand. """ - print "Creating html documentation ..." + print("Creating html documentation ...") try: buildername = 'html' @@ -69,10 +70,10 @@ class SphinxBuilder(object): app.builder.build_all() - except ImportError, ie: + except ImportError: traceback.print_exc() - except Exception, ex: - print >> sys.stderr, "FAIL! exiting ... (%s)" % ex + except Exception as ex: + print("FAIL! exiting ... (%s)" % ex, file=sys.stderr) def build_docs(self): self.app.builder.build_all() @@ -83,9 +84,9 @@ def build_rst_docs(): if __name__ == '__main__': if '-h' in sys.argv or '--help' in sys.argv: - print "This script builds the html documentation from rst/asciidoc sources.\n" - print " Run 'make docs' to build everything." - print " Run 'make viewdocs' to build and then preview in a web browser." + print("This script builds the html documentation from rst/asciidoc sources.\n") + print(" Run 'make docs' to build everything.") + print(" Run 'make viewdocs' to build and then preview in a web browser.") sys.exit(0) build_rst_docs() @@ -93,4 +94,4 @@ if __name__ == '__main__': if "view" in sys.argv: import webbrowser if not webbrowser.open('htmlout/index.html'): - print >> sys.stderr, "Could not open on your webbrowser." + print("Could not open on your webbrowser.", file=sys.stderr) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f4ab5d7d9a..4c94ca3f2c 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -140,7 +140,7 @@ def list_modules(module_dir, depth=0): if os.path.isdir(d): res = list_modules(d, depth + 1) - for key in res.keys(): + for key in list(res.keys()): if key in categories: categories[key] = merge_hash(categories[key], res[key]) res.pop(key, None) @@ -451,7 +451,7 @@ def main(): categories = list_modules(options.module_dir) last_category = None - category_names = categories.keys() + category_names = list(categories.keys()) category_names.sort() category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") From d4ccb0be59c86d8518ba4becaed5c7442d8758fc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 09:20:49 -0800 Subject: [PATCH 191/590] have always_run override check mode for a task Fixes #13418 --- lib/ansible/playbook/play_context.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 9320a23ed9..81223500ad 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -395,6 +395,10 @@ class PlayContext(Base): # set become defaults if not previouslly set task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user) + # have always_run override check mode + if task.always_run: + new_info.check_mode = False + return new_info def make_become_cmd(self, cmd, executable=None): From 7ffd578a9d38b80e71ef6df2219f7e887e2909b7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 09:24:20 -0800 Subject: [PATCH 192/590] Revert "Fix always_run support in the action plugin for template when copying" This reverts commit 9c4eae525306bf201304a15d36f531b0308cd25e. --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 5edc4e8a2c..109f3e80c0 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -157,7 +157,7 @@ class ActionModule(ActionBase): if self._play_context.diff: diff = self._get_diff_data(dest, resultant, task_vars, source_file=False) - if not self._play_context.check_mode or self._task.always_run: # do actual work thorugh copy + if not self._play_context.check_mode: # do actual work thorugh copy xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user From 05c8bb79f8158ca8a93d50bc798dd1bed02aaa89 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 8 Dec 2015 12:24:42 -0500 Subject: [PATCH 193/590] playbook that Ansible jenkins runs moved into core The playbook is already running in jenkins and works. This moves the assets into core for ease of maintenance going forward. --- .../ansible.cfg | 2 + .../ec2.yml | 41 ++++++++++ .../inventory | 1 + .../inventory.dynamic | 3 + .../main.yml | 62 ++++++++++++++ .../roles/ansible_deps/.gitignore | 1 + .../roles/ansible_deps/.travis.yml | 37 +++++++++ .../roles/ansible_deps/README.md | 8 ++ .../roles/ansible_deps/defaults/main.yml | 2 + .../roles/ansible_deps/handlers/main.yml | 2 + .../ansible_deps/meta/.galaxy_install_info | 1 + .../roles/ansible_deps/meta/main.yml | 23 ++++++ .../roles/ansible_deps/tasks/main.yml | 81 +++++++++++++++++++ .../roles/ansible_deps/test/inventory | 1 + .../roles/ansible_deps/test/main.yml | 29 +++++++ .../roles/ansible_deps/test/requirements.yml | 2 + .../roles/ansible_deps/vars/main.yml | 2 + .../roles/run_integration/tasks/main.yml | 20 +++++ 18 files changed, 318 insertions(+) create mode 100644 test/utils/ansible-playbook_integration_runner/ansible.cfg create mode 100644 test/utils/ansible-playbook_integration_runner/ec2.yml create mode 100644 test/utils/ansible-playbook_integration_runner/inventory create mode 100644 test/utils/ansible-playbook_integration_runner/inventory.dynamic create mode 100644 test/utils/ansible-playbook_integration_runner/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/ansible.cfg b/test/utils/ansible-playbook_integration_runner/ansible.cfg new file mode 100644 index 0000000000..14c8065152 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +host_key_checking = False diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml new file mode 100644 index 0000000000..59e15f0da1 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -0,0 +1,41 @@ +- name: Launch Instance + ec2: + group_id: 'sg-07bb906d' # jenkins-slave_new + count: 1 + instance_type: 'm3.medium' + image: '{{ item.image }}' + wait: true + region: 'us-east-1' + keypair: '{{ keypair }}' + aws_access_key: "{{ aws_access_key|default(lookup('env', 'AWS_ACCESS_KEY')) }}" + aws_secret_key: "{{ aws_secret_key|default(lookup('env', 'AWS_SECRET_KEY')) }}" + instance_tags: + jenkins: jenkins_ansible_pr_test + register: ec2 + with_items: slaves +# We could do an async here, that would speed things up + + +- name: Wait for SSH + wait_for: + host: "{{ item['instances'][0]['public_ip'] }}" + port: 22 + delay: 10 + timeout: 320 + state: started + with_items: ec2.results + +- name: Wait a little longer for centos + pause: seconds=20 + +- name: Add hosts group temporary inventory group with pem path + add_host: + name: "{{ item.1.platform }} {{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + groups: dynamic_hosts + ansible_ssh_host: "{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + ansible_ssh_private_key_file: '{{ pem_path }}' + ansible_ssh_user: "{{ item.1.ssh_user }}" + ec2_vars: "{{ ec2.results[item.0]['instances'][0] }}" + ec2_instance_ids: "{{ ec2.results[item.0]['instance_ids'] }}" + with_indexed_items: slaves + diff --git a/test/utils/ansible-playbook_integration_runner/inventory b/test/utils/ansible-playbook_integration_runner/inventory new file mode 100644 index 0000000000..42de3a1b5d --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/inventory @@ -0,0 +1 @@ +localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" diff --git a/test/utils/ansible-playbook_integration_runner/inventory.dynamic b/test/utils/ansible-playbook_integration_runner/inventory.dynamic new file mode 100644 index 0000000000..1aa03b4ed8 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/inventory.dynamic @@ -0,0 +1,3 @@ +localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" +[dynamic_hosts] +54.157.26.110 ansible_ssh_user=root ansible_ssh_private_key_file=/Users/meyers/Dropbox/.ssh/Ansible_chris_meyers.pem diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml new file mode 100644 index 0000000000..8661a6dba9 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -0,0 +1,62 @@ +- hosts: all + connection: local + vars: + slaves: + - distribution: "Ubuntu" + version: "12.04" + image: "ami-2ccc7a44" + ssh_user: "ubuntu" + platform: "ubuntu-12.04-x86_64" + - distribution: "Ubuntu" + version: "14.04" + image: "ami-9a562df2" + ssh_user: "ubuntu" + platform: "ubuntu-14.04-x86_64" + - distribution: "CentOS" + version: "6.5" + image: "ami-8997afe0" + ssh_user: "root" + platform: "centos-6.5-x86_64" + - distribution: "CentOS" + version: "7" + image: "ami-96a818fe" + ssh_user: "centos" + platform: "centos-7-x86_64" + + tasks: + - debug: var=ansible_version + - include: ec2.yml + when: groups['dynamic_hosts'] is not defined + +- hosts: dynamic_hosts + sudo: true + vars: + credentials_file: '' + test_flags: "" + make_target: "non_destructive" + #pre_tasks: + roles: + - { role: ansible_deps, tags: ansible_deps } + - { role: run_integration, + tags: run_integration, + run_integration_test_flags: "{{ test_flags }}", + run_integration_credentials_file: "{{ credentials_file }}", + run_integration_make_target: "{{ make_target }}", } + tasks: + + - name: Kill ec2 instances + sudo: false + local_action: + module: ec2 + state: absent + region: 'us-east-1' + instance_ids: "{{ hostvars[item]['ec2_instance_ids'] }}" + when: hostvars[item]['ec2_instance_ids'] is defined and item == inventory_hostname + with_items: groups['dynamic_hosts'] + + - set_fact: + ansible_connection: local + + - name: Fail + shell: 'echo "{{ inventory_hostname }}, Failed" && exit 1' + when: "test_results.rc != 0" diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore new file mode 100644 index 0000000000..1377554ebe --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml new file mode 100644 index 0000000000..2264f0b20a --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml @@ -0,0 +1,37 @@ +sudo: required +dist: trusty +language: python +python: + - "2.7" +services: + - docker +env: + global: + - PATH="/usr/bin:$PATH" + +before_install: + # Ansible doesn't play well with virtualenv + - deactivate + - sudo apt-get update -qq + - sudo apt-get install docker-engine + +install: + - sudo pip install docker-py + # software-properties-common for ubuntu 14.04 + # python-software-properties for ubuntu 12.04 + - sudo apt-get install -y sshpass software-properties-common python-software-properties + - sudo apt-add-repository -y ppa:ansible/ansible + - sudo apt-get update -qq + - sudo apt-get install -y ansible + - sudo rm /usr/bin/python && sudo ln -s /usr/bin/python2.7 /usr/bin/python + - ansible-galaxy install -r test/requirements.yml -p test/roles/ + +script: + # Ensure any invocation of ansible-playbook (i.e. sudo) results in host_key_checking disabled + - sudo ansible all -i "127.0.0.1," -m lineinfile -a "regexp=^#host_key_checking dest=/etc/ansible/ansible.cfg line='host_key_checking = False'" -c local + - ansible-playbook -i test/inventory test/main.yml --syntax-check + - sudo ansible-playbook -i test/inventory test/main.yml + +notifications: + # notify ansible galaxy of results + webhooks: http://goo.gl/nSuq9h diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md new file mode 100644 index 0000000000..f0fc755863 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md @@ -0,0 +1,8 @@ +[![Build Status](https://travis-ci.org/chrismeyersfsu/role-ansible_deps.svg)](https://travis-ci.org/chrismeyersfsu/role-ansible_deps) + +ansible_deps +========= + +Install needed packages to run ansible integration tests. + +This role is periodically synced from ansible core repo to chrismeyersfsu/role-ansible_deps so that automated tests may run and so this role is accessible from galaxy. diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml new file mode 100644 index 0000000000..c7837fc56b --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml new file mode 100644 index 0000000000..050cdd1234 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info new file mode 100644 index 0000000000..ffc298fff6 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info @@ -0,0 +1 @@ +{install_date: 'Tue Dec 8 15:06:28 2015', version: master} diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml new file mode 100644 index 0000000000..07c15d619e --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml @@ -0,0 +1,23 @@ +--- +galaxy_info: + author: Chris Meyers + description: install ansible integration test dependencies + company: Ansible + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Ubuntu + versions: + - precise + - trusty + galaxy_tags: + - testing + - integration + - ansible + - dependencies +dependencies: [] + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml new file mode 100644 index 0000000000..f71128921d --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml @@ -0,0 +1,81 @@ +--- + +- name: Install sudo + yum: name=sudo state=installed + ignore_errors: true + when: ansible_os_family == 'RedHat' + +- name: Install sudo + apt: name=sudo state=installed + ignore_errors: true + when: ansible_os_family == 'Debian' + +- name: Install RH epel + yum: name="epel-release" state=installed + sudo: true + when: ansible_os_family == 'RedHat' + +- name: Install RH ansible dependencies + yum: name="{{ item }}" state=installed + sudo: true + with_items: + - python-pip + - python-httplib2 + - rsync + - subversion + - mercurial + - git + - rubygems + - unzip + - openssl + - make + - gcc + - python-devel + - libselinux-python + when: ansible_os_family == 'RedHat' + +- apt: update_cache=yes + when: ansible_os_family == 'Debian' + +- name: Install Debian ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - python-pip + - python-httplib2 + - rsync + - subversion + - mercurial + - git + - unzip + - python-dev + when: ansible_os_family == 'Debian' + +- name: Install ubuntu 12.04 ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - rubygems + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" + +- name: Install ubuntu 14.04 ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - rubygems-integration + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "14.04" + +- name: Install ansible pip deps + sudo: true + pip: name="{{ item }}" + with_items: + - PyYAML + - Jinja2 + - paramiko + +- name: Remove tty sudo requirement + sudo: true + lineinfile: "dest=/etc/sudoers regexp='^Defaults[ , ]*requiretty' line='#Defaults requiretty'" + when: ansible_os_family == 'RedHat' + + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory new file mode 100644 index 0000000000..2302edae31 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml new file mode 100644 index 0000000000..95617dbfac --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml @@ -0,0 +1,29 @@ +--- +- name: Bring up docker containers + hosts: localhost + gather_facts: false + vars: + inventory: + - name: ansible_deps_host_1 + image: "chrismeyers/centos6" + - name: ansible_deps_host_2 + image: "chrismeyers/ubuntu12.04" + - name: ansible_deps_host_3 + image: "ubuntu-upstart:14.04" + roles: + - { role: provision_docker, provision_docker_company: 'ansible', provision_docker_inventory: "{{ inventory }}" } + +- name: Run ansible_deps Tests + hosts: docker_containers + vars: + git_dir: "/tmp/ansible" + roles: + - { role: ansible_deps } + tasks: + - name: Clone ansible + git: + repo: "https://github.com/ansible/ansible.git" + dest: "{{ git_dir }}" + - name: Invoke ansible in hacking mode + shell: "cd {{ git_dir }} && . hacking/env-setup && ansible --version && ansible-playbook --version" + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml new file mode 100644 index 0000000000..fa10641a72 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml @@ -0,0 +1,2 @@ +- src: chrismeyersfsu.provision_docker + name: provision_docker diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml new file mode 100644 index 0000000000..a38c5fb042 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml new file mode 100644 index 0000000000..2114567d15 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Sync ansible repo to ec2 instance + synchronize: + src: "{{ sync_dir }}/" + dest: "~/ansible" + +- name: Get ansible source dir + sudo: false + shell: "cd ~ && pwd" + register: results + +- shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + sudo: true + environment: + TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" + CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" + args: + chdir: "{{ results.stdout }}/ansible" + register: test_results + ignore_errors: true From 822624d061c55c5386e260b67d923627df3394fd Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 8 Dec 2015 14:05:57 -0500 Subject: [PATCH 194/590] rename role ansible_deps to ansible_test_deps --- .../roles/{ansible_deps => ansible_test_deps}/.gitignore | 0 .../roles/{ansible_deps => ansible_test_deps}/.travis.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/README.md | 0 .../roles/{ansible_deps => ansible_test_deps}/defaults/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/handlers/main.yml | 0 .../{ansible_deps => ansible_test_deps}/meta/.galaxy_install_info | 0 .../roles/{ansible_deps => ansible_test_deps}/meta/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/tasks/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/test/inventory | 0 .../roles/{ansible_deps => ansible_test_deps}/test/main.yml | 0 .../{ansible_deps => ansible_test_deps}/test/requirements.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/vars/main.yml | 0 12 files changed, 0 insertions(+), 0 deletions(-) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/.gitignore (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/.travis.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/README.md (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/defaults/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/handlers/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/meta/.galaxy_install_info (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/meta/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/tasks/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/inventory (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/requirements.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/vars/main.yml (100%) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml From de690445bca1f47e773e43b6cd6f1ed0b2ec278b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 14:00:17 -0500 Subject: [PATCH 195/590] Make fact delegating configurable, defaulting to 1.x behavior --- lib/ansible/playbook/block.py | 1 + lib/ansible/playbook/role/__init__.py | 1 + lib/ansible/playbook/role/include.py | 3 ++- lib/ansible/playbook/task.py | 1 + lib/ansible/plugins/strategy/__init__.py | 2 +- 5 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 0de5e635e7..e842883bc8 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -34,6 +34,7 @@ class Block(Base, Become, Conditional, Taggable): _rescue = FieldAttribute(isa='list', default=[]) _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) # for future consideration? this would be functionally # similar to the 'else' clause for exceptions diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 3cb914689f..bd7760d221 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -61,6 +61,7 @@ def hash_params(params): class Role(Base, Become, Conditional, Taggable): _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) def __init__(self, play=None): self._role_name = None diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py index 67949e2e12..6e89eb3334 100644 --- a/lib/ansible/playbook/role/include.py +++ b/lib/ansible/playbook/role/include.py @@ -40,7 +40,8 @@ class RoleInclude(RoleDefinition): is included for execution in a play. """ - _delegate_to = FieldAttribute(isa='string') + _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 21dbc87bec..6c7730cb2a 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -72,6 +72,7 @@ class Task(Base, Conditional, Taggable, Become): _changed_when = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 0d0cc4a9dc..732a9293d2 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -289,7 +289,7 @@ class StrategyBase: # find the host we're actually refering too here, which may # be a host that is not really in inventory at all - if task.delegate_to is not None: + if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: From 398f6bbb89ebdcd3ef0efdbc26d54801a0eb2e55 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 14:34:37 -0500 Subject: [PATCH 196/590] Fix typo from 5ae850c --- lib/ansible/playbook/block.py | 2 +- lib/ansible/playbook/role/__init__.py | 2 +- lib/ansible/playbook/role/include.py | 2 +- lib/ansible/playbook/task.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index e842883bc8..f2d9c82833 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -34,7 +34,7 @@ class Block(Base, Become, Conditional, Taggable): _rescue = FieldAttribute(isa='list', default=[]) _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) # for future consideration? this would be functionally # similar to the 'else' clause for exceptions diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index bd7760d221..1c6b344a4f 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -61,7 +61,7 @@ def hash_params(params): class Role(Base, Become, Conditional, Taggable): _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) def __init__(self, play=None): self._role_name = None diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py index 6e89eb3334..43e2d9e4fc 100644 --- a/lib/ansible/playbook/role/include.py +++ b/lib/ansible/playbook/role/include.py @@ -41,7 +41,7 @@ class RoleInclude(RoleDefinition): """ _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 6c7730cb2a..17f1952e39 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -72,7 +72,7 @@ class Task(Base, Conditional, Taggable, Become): _changed_when = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) From ec5827c22a1f238591c4c21413bf690ceb83aa1f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 11:52:59 -0800 Subject: [PATCH 197/590] updated with delegate_facts directive --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36886531bb..3d31ef4ebb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,7 @@ Ansible Changes By Release by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. * Added `meta: refresh_inventory` to force rereading the inventory in a play. This re-executes inventory scripts, but does not force them to ignore any cache they might use. -* Now when you delegate an action that returns ansible_facts, these facts will be applied to the delegated host, unlike before when they were applied to the current host. +* New delegate_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory_hostname (no/false) which is the default and previous behaviour. * local connections now work with 'su' as a privilege escalation method * New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. From 795fac917ea5970fd9583a41dad7a6d33a626b75 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 11:59:04 -0800 Subject: [PATCH 198/590] fixed typo in tree callback, added default dir this would allow it to work with playbooks also --- lib/ansible/plugins/callback/tree.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py index 8b1118864e..b6ecd6de87 100644 --- a/lib/ansible/plugins/callback/tree.py +++ b/lib/ansible/plugins/callback/tree.py @@ -41,7 +41,8 @@ class CallbackModule(CallbackBase): self.tree = TREE_DIR if not self.tree: - self._display.warnings("Disabling tree callback, invalid directory provided to tree option: %s" % self.tree) + self.tree = os.path.expanduser("~/.ansible/tree") + self._display.warning("Defaulting to ~/.ansible/tree, invalid directory provided to tree option: %s" % self.tree) def write_tree_file(self, hostname, buf): ''' write something into treedir/hostname ''' @@ -53,7 +54,7 @@ class CallbackModule(CallbackBase): with open(path, 'wb+') as fd: fd.write(buf) except (OSError, IOError) as e: - self._display.warnings("Unable to write to %s's file: %s" % (hostname, str(e))) + self._display.warning("Unable to write to %s's file: %s" % (hostname, str(e))) def result_to_tree(self, result): if self.tree: From 1799de8528926355f51f79f705a6927a05ba018a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 15:02:25 -0500 Subject: [PATCH 199/590] Preserve original token when appending to _raw_params in parse_kv Fixes #13311 --- lib/ansible/parsing/splitter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index c506603acb..f24d8ecf9d 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -65,8 +65,8 @@ def parse_kv(args, check_raw=False): raise raw_params = [] - for x in vargs: - x = _decode_escapes(x) + for orig_x in vargs: + x = _decode_escapes(orig_x) if "=" in x: pos = 0 try: @@ -90,7 +90,7 @@ def parse_kv(args, check_raw=False): else: options[k.strip()] = unquote(v.strip()) else: - raw_params.append(x) + raw_params.append(orig_x) # recombine the free-form params, if any were found, and assign # them to a special option for use later by the shell/command module From 0e55398e16de1ca99dbe2115a4809c57cdbb5150 Mon Sep 17 00:00:00 2001 From: Jeremy Audet Date: Tue, 8 Dec 2015 09:39:45 -0500 Subject: [PATCH 200/590] Make "make webdocs" compatible with Python 3 The `webdocs` make target fails under Python 3. It fails due to a variety of syntax errors, such as the use of `except Foo, e` and `print 'foo'`. Fix #13463 by making code compatible with both Python 2 and 3. --- docsite/build-site.py | 23 ++++++++++++----------- hacking/module_formatter.py | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docsite/build-site.py b/docsite/build-site.py index 587a189f07..24f9fc9a64 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import print_function __docformat__ = 'restructuredtext' @@ -24,9 +25,9 @@ import traceback try: from sphinx.application import Sphinx except ImportError: - print "#################################" - print "Dependency missing: Python Sphinx" - print "#################################" + print("#################################") + print("Dependency missing: Python Sphinx") + print("#################################") sys.exit(1) import os @@ -40,7 +41,7 @@ class SphinxBuilder(object): """ Run the DocCommand. """ - print "Creating html documentation ..." + print("Creating html documentation ...") try: buildername = 'html' @@ -69,10 +70,10 @@ class SphinxBuilder(object): app.builder.build_all() - except ImportError, ie: + except ImportError: traceback.print_exc() - except Exception, ex: - print >> sys.stderr, "FAIL! exiting ... (%s)" % ex + except Exception as ex: + print("FAIL! exiting ... (%s)" % ex, file=sys.stderr) def build_docs(self): self.app.builder.build_all() @@ -83,9 +84,9 @@ def build_rst_docs(): if __name__ == '__main__': if '-h' in sys.argv or '--help' in sys.argv: - print "This script builds the html documentation from rst/asciidoc sources.\n" - print " Run 'make docs' to build everything." - print " Run 'make viewdocs' to build and then preview in a web browser." + print("This script builds the html documentation from rst/asciidoc sources.\n") + print(" Run 'make docs' to build everything.") + print(" Run 'make viewdocs' to build and then preview in a web browser.") sys.exit(0) build_rst_docs() @@ -93,4 +94,4 @@ if __name__ == '__main__': if "view" in sys.argv: import webbrowser if not webbrowser.open('htmlout/index.html'): - print >> sys.stderr, "Could not open on your webbrowser." + print("Could not open on your webbrowser.", file=sys.stderr) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f4ab5d7d9a..4c94ca3f2c 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -140,7 +140,7 @@ def list_modules(module_dir, depth=0): if os.path.isdir(d): res = list_modules(d, depth + 1) - for key in res.keys(): + for key in list(res.keys()): if key in categories: categories[key] = merge_hash(categories[key], res[key]) res.pop(key, None) @@ -451,7 +451,7 @@ def main(): categories = list_modules(options.module_dir) last_category = None - category_names = categories.keys() + category_names = list(categories.keys()) category_names.sort() category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") From 021605a19578309cccc5cdec8c47c512b819d7e0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Nov 2015 18:42:39 -0800 Subject: [PATCH 201/590] keep string type filters as strings now we don't try to convert types if using a filter that outputs a specifically formated string made list of filters configurable --- lib/ansible/constants.py | 1 + lib/ansible/template/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6faae928db..0f809db729 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -261,6 +261,7 @@ GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" +STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bdd0612bdd..8ce2358eb1 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -164,7 +164,8 @@ class Templar: self.block_end = self.environment.block_end_string self.variable_start = self.environment.variable_start_string self.variable_end = self.environment.variable_end_string - self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1])) + self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (self.variable_start, self.block_start, self.block_end, self.variable_end)) + self._no_type_regex = re.compile(r'.*\|(?:%s)\s*(?:%s)?$' % ('|'.join(C.STRING_TYPE_FILTERS), self.variable_end)) def _get_filters(self): ''' @@ -278,8 +279,7 @@ class Templar: if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors - # Don't template unsafe variables, instead drop them back down to - # their constituent type. + # Don't template unsafe variables, instead drop them back down to their constituent type. if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): return self._clean_data(text_type(variable)) @@ -294,6 +294,7 @@ class Templar: if isinstance(variable, string_types): result = variable + if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single @@ -319,7 +320,7 @@ class Templar: result = self._cached_result[sha1_hash] else: result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides) - if convert_data: + if convert_data and not self._no_type_regex.match(variable): # if this looks like a dictionary or list, convert it to such using the safe_eval method if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \ result.startswith("[") or result in ("True", "False"): From d82d65ee7bd2506e06ffb225a2e9be6fa1ac36db Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Nov 2015 18:42:39 -0800 Subject: [PATCH 202/590] keep string type filters as strings now we don't try to convert types if using a filter that outputs a specifically formated string made list of filters configurable --- lib/ansible/constants.py | 1 + lib/ansible/template/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6faae928db..0f809db729 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -261,6 +261,7 @@ GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" +STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bdd0612bdd..8ce2358eb1 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -164,7 +164,8 @@ class Templar: self.block_end = self.environment.block_end_string self.variable_start = self.environment.variable_start_string self.variable_end = self.environment.variable_end_string - self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1])) + self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (self.variable_start, self.block_start, self.block_end, self.variable_end)) + self._no_type_regex = re.compile(r'.*\|(?:%s)\s*(?:%s)?$' % ('|'.join(C.STRING_TYPE_FILTERS), self.variable_end)) def _get_filters(self): ''' @@ -278,8 +279,7 @@ class Templar: if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors - # Don't template unsafe variables, instead drop them back down to - # their constituent type. + # Don't template unsafe variables, instead drop them back down to their constituent type. if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): return self._clean_data(text_type(variable)) @@ -294,6 +294,7 @@ class Templar: if isinstance(variable, string_types): result = variable + if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single @@ -319,7 +320,7 @@ class Templar: result = self._cached_result[sha1_hash] else: result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides) - if convert_data: + if convert_data and not self._no_type_regex.match(variable): # if this looks like a dictionary or list, convert it to such using the safe_eval method if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \ result.startswith("[") or result in ("True", "False"): From c1cec64aa8372f2e7d565a2717c68a075836ae9b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 14:18:11 -0800 Subject: [PATCH 203/590] added delegate_facts docs --- docsite/rst/playbooks_delegation.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index 4411e4aa29..4e2e8c372a 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -130,6 +130,29 @@ Here is an example:: Note that you must have passphrase-less SSH keys or an ssh-agent configured for this to work, otherwise rsync will need to ask for a passphrase. +.. _delegate_facts: + +Delegated facts +``````````````` + +.. versionadded:: 2.0 + +Before 2.0 any facts gathered by a delegated task were assigned to the `inventory_hostname` (current host) instead of the host which actually produced the facts (delegated to host). +The new directive `delegate_facts` if set to `True` will assing the task's gathered facts to the delegated host instead of the current one.:: + + + - hosts: app_servers + tasks: + - name: gather facts from db servers + setup: + delegate_to: "{{item}}" + delegate_facts: True + with_items: "{{groups['dbservers'}}" + +The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers, +that way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. + + .. _run_once: Run Once From 57391f49ba5e7692e50e4e43ed9c541511eb0936 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 9 Dec 2015 07:52:43 -0500 Subject: [PATCH 204/590] removed ansible_python_interpreter * added missed renames of ansible_deps to ansible_test_deps * removed acidential inventory.dynamic file * modified README for ansible_test_deps role --- .../ansible-playbook_integration_runner/inventory | 2 +- .../inventory.dynamic | 3 --- .../utils/ansible-playbook_integration_runner/main.yml | 2 +- .../roles/ansible_test_deps/README.md | 6 ++---- .../roles/ansible_test_deps/test/main.yml | 10 +++++----- 5 files changed, 9 insertions(+), 14 deletions(-) delete mode 100644 test/utils/ansible-playbook_integration_runner/inventory.dynamic diff --git a/test/utils/ansible-playbook_integration_runner/inventory b/test/utils/ansible-playbook_integration_runner/inventory index 42de3a1b5d..2302edae31 100644 --- a/test/utils/ansible-playbook_integration_runner/inventory +++ b/test/utils/ansible-playbook_integration_runner/inventory @@ -1 +1 @@ -localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" +localhost ansible_connection=local diff --git a/test/utils/ansible-playbook_integration_runner/inventory.dynamic b/test/utils/ansible-playbook_integration_runner/inventory.dynamic deleted file mode 100644 index 1aa03b4ed8..0000000000 --- a/test/utils/ansible-playbook_integration_runner/inventory.dynamic +++ /dev/null @@ -1,3 +0,0 @@ -localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" -[dynamic_hosts] -54.157.26.110 ansible_ssh_user=root ansible_ssh_private_key_file=/Users/meyers/Dropbox/.ssh/Ansible_chris_meyers.pem diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 8661a6dba9..5d15541490 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -36,7 +36,7 @@ make_target: "non_destructive" #pre_tasks: roles: - - { role: ansible_deps, tags: ansible_deps } + - { role: ansible_test_deps, tags: ansible_test_deps } - { role: run_integration, tags: run_integration, run_integration_test_flags: "{{ test_flags }}", diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md index f0fc755863..09ffacacaf 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md @@ -1,8 +1,6 @@ -[![Build Status](https://travis-ci.org/chrismeyersfsu/role-ansible_deps.svg)](https://travis-ci.org/chrismeyersfsu/role-ansible_deps) +[![Build Status](https://travis-ci.org/chrismeyersfsu/ansible_test_deps.svg)](https://travis-ci.org/chrismeyersfsu/ansible_test_deps) -ansible_deps +ansible_test_deps ========= Install needed packages to run ansible integration tests. - -This role is periodically synced from ansible core repo to chrismeyersfsu/role-ansible_deps so that automated tests may run and so this role is accessible from galaxy. diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml index 95617dbfac..b66d699d5d 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml @@ -4,21 +4,21 @@ gather_facts: false vars: inventory: - - name: ansible_deps_host_1 + - name: ansible_test_deps_host_1 image: "chrismeyers/centos6" - - name: ansible_deps_host_2 + - name: ansible_test_deps_host_2 image: "chrismeyers/ubuntu12.04" - - name: ansible_deps_host_3 + - name: ansible_test_deps_host_3 image: "ubuntu-upstart:14.04" roles: - { role: provision_docker, provision_docker_company: 'ansible', provision_docker_inventory: "{{ inventory }}" } -- name: Run ansible_deps Tests +- name: Run ansible_test_deps Tests hosts: docker_containers vars: git_dir: "/tmp/ansible" roles: - - { role: ansible_deps } + - { role: ansible_test_deps } tasks: - name: Clone ansible git: From f16628ffecfa5ece0535c9b1c3de78cc78e18575 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 9 Dec 2015 09:37:39 -0500 Subject: [PATCH 205/590] symbolic link role for testing --- .../roles/ansible_test_deps/test/roles/ansible_test_deps | 1 + 1 file changed, 1 insertion(+) create mode 120000 test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps new file mode 120000 index 0000000000..eb6d9edda4 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps @@ -0,0 +1 @@ +../../../ansible_test_deps \ No newline at end of file From 8d66dcda21f176ee7cce21e99f52dea384ef42b8 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 9 Dec 2015 09:39:45 -0500 Subject: [PATCH 206/590] remove .gitignore --- .../roles/ansible_test_deps/.gitignore | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore deleted file mode 100644 index 1377554ebe..0000000000 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.swp From 0719eb3e2d798c6f80223e37dd77bc0ac41c537d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 06:32:04 -0800 Subject: [PATCH 207/590] clarified warning from tree callback --- lib/ansible/plugins/callback/tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py index b6ecd6de87..ee710a6dfd 100644 --- a/lib/ansible/plugins/callback/tree.py +++ b/lib/ansible/plugins/callback/tree.py @@ -42,7 +42,7 @@ class CallbackModule(CallbackBase): self.tree = TREE_DIR if not self.tree: self.tree = os.path.expanduser("~/.ansible/tree") - self._display.warning("Defaulting to ~/.ansible/tree, invalid directory provided to tree option: %s" % self.tree) + self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree) def write_tree_file(self, hostname, buf): ''' write something into treedir/hostname ''' From 87969868d42cd8aba1c65c8207d059d73407373b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 07:21:00 -0800 Subject: [PATCH 208/590] avoid persistent containers in attribute defaults moved from the field attribute declaration and created a placeholder which then is resolved in the field attribute class. this is to avoid unwanted persistent of the defaults across objects which introduces stealth bugs when multiple objects of the same kind are used in succession while not overriding the default values. --- lib/ansible/playbook/attribute.py | 11 +++++++++++ lib/ansible/playbook/block.py | 6 +++--- lib/ansible/playbook/conditional.py | 2 +- lib/ansible/playbook/play.py | 16 ++++++++-------- lib/ansible/playbook/play_context.py | 4 ++-- lib/ansible/playbook/playbook_include.py | 2 +- lib/ansible/playbook/role/metadata.py | 2 +- lib/ansible/playbook/taggable.py | 2 +- lib/ansible/playbook/task.py | 2 +- 9 files changed, 29 insertions(+), 18 deletions(-) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index 703d9dbca1..ce7ed6d8fe 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -32,6 +32,17 @@ class Attribute: self.priority = priority self.always_post_validate = always_post_validate + # This is here to avoid `default=` unwanted persistence across object instances + # We cannot rely on None as some fields use it to skip the code + # that would detect an empty container as a user error + if self.default == '_ansible_container': + if self.isa == 'list': + self.default = [] + elif self.isa == 'dict': + self.default = {} + elif self.isa == 'set': + self.default = set() + def __eq__(self, other): return other.priority == self.priority diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index f2d9c82833..66009b028a 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -30,9 +30,9 @@ from ansible.playbook.taggable import Taggable class Block(Base, Become, Conditional, Taggable): - _block = FieldAttribute(isa='list', default=[]) - _rescue = FieldAttribute(isa='list', default=[]) - _always = FieldAttribute(isa='list', default=[]) + _block = FieldAttribute(isa='list', default='_ansible_container') + _rescue = FieldAttribute(isa='list', default='_ansible_container') + _always = FieldAttribute(isa='list', default='_ansible_container') _delegate_to = FieldAttribute(isa='list') _delegate_facts = FieldAttribute(isa='bool', default=False) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index fc178e2fa1..a5b3ca725f 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -33,7 +33,7 @@ class Conditional: to be run conditionally when a condition is met or skipped. ''' - _when = FieldAttribute(isa='list', default=[]) + _when = FieldAttribute(isa='list', default='_ansible_container') def __init__(self, loader=None): # when used directly, this class needs a loader, but we want to diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ed61416e95..e08c8c6001 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,22 +64,22 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', default='_ansible_container', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes - _vars_files = FieldAttribute(isa='list', default=[], priority=99) - _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True) + _vars_files = FieldAttribute(isa='list', default='_ansible_container', priority=99) + _vars_prompt = FieldAttribute(isa='list', default='_ansible_container', always_post_validate=True) _vault_password = FieldAttribute(isa='string', always_post_validate=True) # Role Attributes - _roles = FieldAttribute(isa='list', default=[], priority=90) + _roles = FieldAttribute(isa='list', default='_ansible_container', priority=90) # Block (Task) Lists Attributes - _handlers = FieldAttribute(isa='list', default=[]) - _pre_tasks = FieldAttribute(isa='list', default=[]) - _post_tasks = FieldAttribute(isa='list', default=[]) - _tasks = FieldAttribute(isa='list', default=[]) + _handlers = FieldAttribute(isa='list', default='_ansible_container') + _pre_tasks = FieldAttribute(isa='list', default='_ansible_container') + _post_tasks = FieldAttribute(isa='list', default='_ansible_container') + _tasks = FieldAttribute(isa='list', default='_ansible_container') # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False, always_post_validate=True) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 81223500ad..da291c3c83 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -171,8 +171,8 @@ class PlayContext(Base): # general flags _verbosity = FieldAttribute(isa='int', default=0) - _only_tags = FieldAttribute(isa='set', default=set()) - _skip_tags = FieldAttribute(isa='set', default=set()) + _only_tags = FieldAttribute(isa='set', default='_ansible_container') + _skip_tags = FieldAttribute(isa='set', default='_ansible_container') _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index d9af2ba523..52081c4153 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -35,7 +35,7 @@ class PlaybookInclude(Base, Conditional, Taggable): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') - _vars = FieldAttribute(isa='dict', default=dict()) + _vars = FieldAttribute(isa='dict', default='_ansible_container') @staticmethod def load(data, basedir, variable_manager=None, loader=None): diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py index 58b59145a1..4bb7d0ce02 100644 --- a/lib/ansible/playbook/role/metadata.py +++ b/lib/ansible/playbook/role/metadata.py @@ -40,7 +40,7 @@ class RoleMetadata(Base): ''' _allow_duplicates = FieldAttribute(isa='bool', default=False) - _dependencies = FieldAttribute(isa='list', default=[]) + _dependencies = FieldAttribute(isa='list', default='_ansible_container') _galaxy_info = FieldAttribute(isa='GalaxyInfo') def __init__(self, owner=None): diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 8f5cfa0934..37e3261e80 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -29,7 +29,7 @@ from ansible.template import Templar class Taggable: untagged = frozenset(['untagged']) - _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int)) + _tags = FieldAttribute(isa='list', default='_ansible_container', listof=(string_types,int)) def __init__(self): super(Taggable, self).__init__() diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 17f1952e39..53a9a3c393 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -64,7 +64,7 @@ class Task(Base, Conditional, Taggable, Become): # will be used if defined # might be possible to define others - _args = FieldAttribute(isa='dict', default=dict()) + _args = FieldAttribute(isa='dict', default='_ansible_container') _action = FieldAttribute(isa='string') _any_errors_fatal = FieldAttribute(isa='bool') From 4f84769a17bb92894ee31b08267cf9aec1c0118c Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 10:51:12 -0500 Subject: [PATCH 209/590] Galaxy 2.0 --- docsite/rst/galaxy.rst | 291 ++++++++++++++++- lib/ansible/cli/galaxy.py | 322 +++++++++++++++++-- lib/ansible/constants.py | 3 +- lib/ansible/galaxy/__init__.py | 2 + lib/ansible/galaxy/api.py | 205 ++++++++---- lib/ansible/galaxy/data/metadata_template.j2 | 14 + lib/ansible/galaxy/data/test_playbook.j2 | 5 + lib/ansible/galaxy/data/travis.j2 | 29 ++ lib/ansible/galaxy/login.py | 113 +++++++ lib/ansible/galaxy/role.py | 10 +- lib/ansible/galaxy/token.py | 67 ++++ 11 files changed, 949 insertions(+), 112 deletions(-) create mode 100644 lib/ansible/galaxy/data/test_playbook.j2 create mode 100644 lib/ansible/galaxy/data/travis.j2 create mode 100644 lib/ansible/galaxy/login.py create mode 100644 lib/ansible/galaxy/token.py diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 1b9475c418..783ac15e45 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -8,7 +8,7 @@ Ansible Galaxy The Website ``````````` -The website `Ansible Galaxy `_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects. +The website `Ansible Galaxy `_, is a free site for finding, downloading, and sharing community developed Ansible roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects. You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. @@ -24,7 +24,7 @@ Installing Roles The most obvious is downloading roles from the Ansible Galaxy website:: - ansible-galaxy install username.rolename + $ ansible-galaxy install username.rolename .. _galaxy_cli_roles_path: @@ -33,23 +33,16 @@ roles_path You can specify a particular directory where you want the downloaded roles to be placed:: - ansible-galaxy install username.role -p ~/Code/ansible_roles/ + $ ansible-galaxy install username.role -p ~/Code/ansible_roles/ This can be useful if you have a master folder that contains ansible galaxy roles shared across several projects. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured). -Building out Role Scaffolding ------------------------------ - -It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: - - ansible-galaxy init rolename - Installing Multiple Roles From A File -------------------------------------- +===================================== To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website:: - ansible-galaxy install -r requirements.txt + $ ansible-galaxy install -r requirements.txt Where the requirements.txt looks like:: @@ -64,7 +57,7 @@ To request specific versions (tags) of a role, use this syntax in the roles file Available versions will be listed on the Ansible Galaxy webpage for that role. Advanced Control over Role Requirements Files ---------------------------------------------- +============================================= For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this:: @@ -121,3 +114,275 @@ Roles pulled from galaxy work as with other SCM sourced roles above. To download `irc.freenode.net `_ #ansible IRC chat channel +Building Role Scaffolding +------------------------- + +Use the init command to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: + + $ ansible-galaxy init rolename + +The above will create the following directory structure in the current working directory: + +:: + + README.md + .travsis.yml + defaults/ + main.yml + files/ + handlers/ + main.yml + meta/ + main.yml + templates/ + tests/ + inventory + test.yml + vars/ + main.yml + +.. note:: + + .travis.yml and tests/ are new in Ansible 2.0 + +If a directory matching the name of the role already exists in the current working directory, the init command will result in an error. To ignore the error use the --force option. Force will create the above subdirectories and files, replacing anything that matches. + +Search for Roles +---------------- + +The search command provides for querying the Galaxy database, allowing for searching by tags, platforms, author and multiple keywords. For example: + +:: + + $ ansible-galaxy search elasticsearch --author geerlingguy + +The search command will return a list of the first 1000 results matching your search: + +:: + + Found 2 roles matching your search: + + Name Description + ---- ----------- + geerlingguy.elasticsearch Elasticsearch for Linux. + geerlingguy.elasticsearch-curator Elasticsearch curator for Linux. + +.. note:: + + The format of results pictured here is new in Ansible 2.0. + +Get More Information About a Role +--------------------------------- + +Use the info command To view more detail about a specific role: + +:: + + $ ansible-galaxy info username.role_name + +This returns everything found in Galaxy for the role: + +:: + + Role: username.rolename + description: Installs and configures a thing, a distributed, highly available NoSQL thing. + active: True + commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57 + commit_message: Adding travis + commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab + company: My Company, Inc. + created: 2015-12-08T14:17:52.773Z + download_count: 1 + forks_count: 0 + github_branch: + github_repo: repo_name + github_user: username + id: 6381 + is_valid: True + issue_tracker_url: + license: Apache + min_ansible_version: 1.4 + modified: 2015-12-08T18:43:49.085Z + namespace: username + open_issues_count: 0 + path: /Users/username/projects/roles + scm: None + src: username.repo_name + stargazers_count: 0 + travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=master + version: + watchers_count: 1 + +.. note:: + + The format of results pictured here is new in Ansible 2.0. + + +List Installed Roles +-------------------- + +The list command shows the name and version of each role installed in roles_path. + +:: + + $ ansible-galaxy list + + - chouseknecht.role-install_mongod, master + - chouseknecht.test-role-1, v1.0.2 + - chrismeyersfsu.role-iptables, master + - chrismeyersfsu.role-required_vars, master + +Remove an Installed Role +------------------------ + +The remove command will delete a role from roles_path: + +:: + + $ ansible-galaxy remove username.rolename + +Authenticate with Galaxy +------------------------ + +To use the import, delete and setup commands authentication with Galaxy is required. The login command will authenticate the user,retrieve a token from Galaxy, and store it in the user's home directory. + +:: + + $ ansible-galaxy login + + We need your Github login to identify you. + This information will not be sent to Galaxy, only to api.github.com. + The password will not be displayed. + + Use --github-token if you do not want to enter your password. + + Github Username: dsmith + Password for dsmith: + Succesfully logged into Galaxy as dsmith + +As depicted above, the login command prompts for a GitHub username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the GitHub personal access token is destroyed. + +If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. + +Import a Role +------------- + +Roles can be imported using ansible-galaxy. The import command expects that the user previously authenticated with Galaxy using the login command. + +Import any GitHub repo you have access to: + +:: + + $ ansible-galaxy import github_user github_repo + +By default the command will wait for the role to be imported by Galaxy, displaying the results as the import progresses: + +:: + + Successfully submitted import request 41 + Starting import 41: role_name=myrole repo=githubuser/ansible-role-repo ref= + Retrieving Github repo githubuser/ansible-role-repo + Accessing branch: master + Parsing and validating meta/main.yml + Parsing galaxy_tags + Parsing platforms + Adding dependencies + Parsing and validating README.md + Adding repo tags as role versions + Import completed + Status SUCCESS : warnings=0 errors=0 + +Use the --branch option to import a specific branch. If not specified, the default branch for the repo will be used. + +If the --no-wait option is present, the command will not wait for results. Results of the most recent import for any of your roles is available on the Galaxy web site under My Imports. + +.. note:: + + The import command is only available in Ansible 2.0. + +Delete a Role +------------- + +Remove a role from the Galaxy web site using the delete command. You can delete any role that you have access to in GitHub. The delete command expects that the user previously authenticated with Galaxy using the login command. + +:: + + ansible-galaxy delete github_user github_repo + +This only removes the role from Galaxy. It does not impact the actual GitHub repo. + +.. note:: + + The delete command is only available in Ansible 2.0. + +Setup Travis Integerations +-------------------------- + +Using the setup command you can enable notifications from `travis `_. The setup command expects that the user previously authenticated with Galaxy using the login command. + +:: + + $ ansible-galaxy setup travis github_user github_repo xxxtravistokenxxx + + Added integration for travis chouseknecht/ansible-role-sendmail + +The setup command requires your Travis token. The Travis token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described in `the Travis documentation `_. The calculated hash is stored in Galaxy and used to verify notifications received from Travis. + +The setup command enables Galaxy to respond to notifications. Follow the `Travis getting started guide `_ to enable the Travis build process for the role repository. + +When you create your .travis.yml file add the following to cause Travis to notify Galaxy when a build completes: + +:: + + notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ + +.. note:: + + The setup command is only available in Ansible 2.0. + + +List Travis Integrtions +======================= + +Use the --list option to display your Travis integrations: + +:: + + $ ansible-galaxy setup --list + + + ID Source Repo + ---------- ---------- ---------- + 2 travis github_user/github_repo + 1 travis github_user/github_repo + + +Remove Travis Integrations +========================== + +Use the --remove option to disable a Travis integration: + +:: + + $ ansible-galaxy setup --remove ID + +Provide the ID of the integration you want disabled. Use the --list option to get the ID. + + + + + + + + + + + + + + + + + + diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 94c04614ac..01e0475b24 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -22,10 +22,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import os import os.path import sys import yaml +import json +import time from collections import defaultdict from jinja2 import Environment @@ -36,7 +37,10 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.role import GalaxyRole +from ansible.galaxy.login import GalaxyLogin +from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement +from ansible.module_utils.urls import open_url try: from __main__ import display @@ -44,18 +48,52 @@ except ImportError: from ansible.utils.display import Display display = Display() - class GalaxyCLI(CLI): - VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search") + available_commands = { + "delete": "remove a role from Galaxy", + "import": "add a role contained in a GitHub repo to Galaxy", + "info": "display details about a particular role", + "init": "create a role directory structure in your roles path", + "install": "download a role into your roles path", + "list": "enumerate roles found in your roles path", + "login": "authenticate with Galaxy API and store the token", + "remove": "delete a role from your roles path", + "search": "query the Galaxy API", + "setup": "add a TravisCI integration to Galaxy", + } + SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + def __init__(self, args): - + self.VALID_ACTIONS = self.available_commands.keys() + self.VALID_ACTIONS.sort() self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) + def set_action(self): + """ + Get the action the user wants to execute from the sys argv list. + """ + for i in range(0,len(self.args)): + arg = self.args[i] + if arg in self.VALID_ACTIONS: + self.action = arg + del self.args[i] + break + + if not self.action: + self.show_available_actions() + + def show_available_actions(self): + # list available commands + display.display(u'\n' + "usage: ansible-galaxy COMMAND [--help] [options] ...") + display.display(u'\n' + "availabe commands:" + u'\n\n') + for key in self.VALID_ACTIONS: + display.display(u'\t' + "%-12s %s" % (key, self.available_commands[key])) + display.display(' ') + def parse(self): ''' create an options parser for bin/ansible ''' @@ -63,11 +101,21 @@ class GalaxyCLI(CLI): usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - + self.set_action() # options specific to actions - if self.action == "info": + if self.action == "delete": + self.parser.set_usage("usage: %prog delete [options] github_user github_repo") + elif self.action == "import": + self.parser.set_usage("usage: %prog import [options] github_user github_repo") + self.parser.add_option('-n', '--no-wait', dest='wait', action='store_false', default=True, + help='Don\'t wait for import results.') + self.parser.add_option('-b', '--branch', dest='reference', + help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') + self.parser.add_option('-t', '--status', dest='check_status', action='store_true', default=False, + help='Check the status of the most recent import request for given github_user/github_repo.') + elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") elif self.action == "init": self.parser.set_usage("usage: %prog init [options] role_name") @@ -83,27 +131,40 @@ class GalaxyCLI(CLI): self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') + help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": self.parser.set_usage("usage: %prog list [role_name]") + elif self.action == "login": + self.parser.set_usage("usage: %prog login [options]") + self.parser.add_option('-g','--github-token', dest='token', default=None, + help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by') self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by') - self.parser.set_usage("usage: %prog search [] [--galaxy-tags ] [--platforms platform]") + self.parser.add_option('--author', dest='author', + help='GitHub username') + self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") + elif self.action == "setup": + self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret" + + u'\n\n' + "Create an integration with travis.") + self.parser.add_option('-r', '--remove', dest='remove_id', default=None, + help='Remove the integration matching the provided ID value. Use --list to see ID values.') + self.parser.add_option('-l', '--list', dest="setup_list", action='store_true', default=False, + help='List all of your integrations.') # options that apply to more than one action - if self.action != "init": + if not self.action in ("config","import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' 'ansible.cfg file (/etc/ansible/roles if not configured)') - if self.action in ("info","init","install","search"): - self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com", + if self.action in ("import","info","init","install","login","search","setup","delete"): + self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True, help='Ignore SSL certificate validation errors.') @@ -112,23 +173,25 @@ class GalaxyCLI(CLI): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') - # get options, args and galaxy object - self.options, self.args =self.parser.parse_args(self.args[1:]) - display.verbosity = self.options.verbosity - self.galaxy = Galaxy(self.options) + if self.action: + # get options, args and galaxy object + self.options, self.args =self.parser.parse_args() + display.verbosity = self.options.verbosity + self.galaxy = Galaxy(self.options) return True def run(self): + if not self.action: + return True + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api - if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline): - api_server = self.options.api_server - self.api = GalaxyAPI(self.galaxy, api_server) - if not self.api: - raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server) + if self.action in ("import","info","install","search","login","setup","delete") or \ + (self.action == 'init' and not self.options.offline): + self.api = GalaxyAPI(self.galaxy) self.execute() @@ -188,7 +251,7 @@ class GalaxyCLI(CLI): "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) - # create the default README.md + # create default README.md if not os.path.exists(role_path): os.makedirs(role_path) readme_path = os.path.join(role_path, "README.md") @@ -196,9 +259,16 @@ class GalaxyCLI(CLI): f.write(self.galaxy.default_readme) f.close() + # create default .travis.yml + travis = Environment().from_string(self.galaxy.default_travis).render() + f = open(os.path.join(role_path, '.travis.yml'), 'w') + f.write(travis) + f.close() + for dir in GalaxyRole.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') + # create the directory if it doesn't exist already if not os.path.exists(dir_path): os.makedirs(dir_path) @@ -234,6 +304,20 @@ class GalaxyCLI(CLI): f.write(rendered_meta) f.close() pass + elif dir == "tests": + # create tests/test.yml + inject = dict( + role_name = role_name + ) + playbook = Environment().from_string(self.galaxy.default_test).render(inject) + f = open(os.path.join(dir_path, 'test.yml'), 'w') + f.write(playbook) + f.close() + + # create tests/inventory + f = open(os.path.join(dir_path, 'inventory'), 'w') + f.write('localhost') + f.close() elif dir not in ('files','templates'): # just write a (mostly) empty YAML file for main.yml f = open(main_yml_path, 'w') @@ -325,7 +409,7 @@ class GalaxyCLI(CLI): for role in required_roles: role = RoleRequirement.role_yaml_parse(role) - display.debug('found role %s in yaml file' % str(role)) + display.vvv('found role %s in yaml file' % str(role)) if 'name' not in role and 'scm' not in role: raise AnsibleError("Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) @@ -348,7 +432,7 @@ class GalaxyCLI(CLI): roles_left.append(GalaxyRole(self.galaxy, rname.strip())) for role in roles_left: - display.debug('Installing role %s ' % role.name) + display.vvv('Installing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None and not force: @@ -458,21 +542,189 @@ class GalaxyCLI(CLI): return 0 def execute_search(self): - + page_size = 1000 search = None - if len(self.args) > 1: - raise AnsibleOptionsError("At most a single search term is allowed.") - elif len(self.args) == 1: - search = self.args.pop() + + if len(self.args): + terms = [] + for i in range(len(self.args)): + terms.append(self.args.pop()) + search = '+'.join(terms) - response = self.api.search_roles(search, self.options.platforms, self.options.tags) + if not search and not self.options.platforms and not self.options.tags and not self.options.author: + raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") - if 'count' in response: - display.display("Found %d roles matching your search:\n" % response['count']) + response = self.api.search_roles(search, platforms=self.options.platforms, + tags=self.options.tags, author=self.options.author, page_size=page_size) + + if response['count'] == 0: + display.display("No roles match your search.", color="yellow") + return True data = '' - if 'results' in response: - for role in response['results']: - data += self._display_role_info(role) + if response['count'] > page_size: + data += ("Found %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) + else: + data += ("Found %d roles matching your search:\n" % response['count']) + + max_len = [] + for role in response['results']: + max_len.append(len(role['username'] + '.' + role['name'])) + name_len = max(max_len) + format_str = " %%-%ds %%s\n" % name_len + data +='\n' + data += (format_str % ("Name", "Description")) + data += (format_str % ("----", "-----------")) + for role in response['results']: + data += (format_str % (role['username'] + '.' + role['name'],role['description'])) + self.pager(data) + + return True + + def execute_login(self): + """ + Verify user's identify via Github and retreive an auth token from Galaxy. + """ + # Authenticate with github and retrieve a token + if self.options.token is None: + login = GalaxyLogin(self.galaxy) + github_token = login.create_github_token() + else: + github_token = self.options.token + + galaxy_response = self.api.authenticate(github_token) + + if self.options.token is None: + # Remove the token we created + login.remove_github_token() + + # Store the Galaxy token + token = GalaxyToken() + token.set(galaxy_response['token']) + + display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username']) + return 0 + + def execute_import(self): + """ + Import a role into Galaxy + """ + + colors = { + 'INFO': 'normal', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'SUCCESS': 'green', + 'FAILED': 'red' + } + + if len(self.args) < 2: + raise AnsibleError("Expected a github_username and github_repository. Use --help.") + + github_repo = self.args.pop() + github_user = self.args.pop() + + if self.options.check_status: + task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) + else: + # Submit an import request + task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) + + if len(task) > 1: + # found multiple roles associated with github_user/github_repo + display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), + color='yellow') + display.display("The following Galaxy roles are being updated:" + u'\n', color='yellow') + for t in task: + display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color='yellow') + display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), + color='yellow') + return 0 + # found a single role as expected + display.display("Successfully submitted import request %d" % task[0]['id']) + if not self.options.wait: + display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) + display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo'])) + + if self.options.check_status or self.options.wait: + # Get the status of the import + msg_list = [] + finished = False + while not finished: + task = self.api.get_import_task(task_id=task[0]['id']) + for msg in task[0]['summary_fields']['task_messages']: + if msg['id'] not in msg_list: + display.display(msg['message_text'], color=colors[msg['message_type']]) + msg_list.append(msg['id']) + if task[0]['state'] in ['SUCCESS', 'FAILED']: + finished = True + else: + time.sleep(10) + + return 0 + + def execute_setup(self): + """ + Setup an integration from Github or Travis + """ + + if self.options.setup_list: + # List existing integration secrets + secrets = self.api.list_secrets() + if len(secrets) == 0: + # None found + display.display("No integrations found.") + return 0 + display.display(u'\n' + "ID Source Repo", color="green") + display.display("---------- ---------- ----------", color="green") + for secret in secrets: + display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], + secret['github_repo']),color="green") + return 0 + + if self.options.remove_id: + # Remove a secret + self.api.remove_secret(self.options.remove_id) + display.display("Secret removed. Integrations using this secret will not longer work.", color="green") + return 0 + + if len(self.args) < 4: + raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") + return 0 + + secret = self.args.pop() + github_repo = self.args.pop() + github_user = self.args.pop() + source = self.args.pop() + + resp = self.api.add_secret(source, github_user, github_repo, secret) + display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo'])) + + return 0 + + def execute_delete(self): + """ + Delete a role from galaxy.ansible.com + """ + + if len(self.args) < 2: + raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") + + github_repo = self.args.pop() + github_user = self.args.pop() + resp = self.api.delete_role(github_user, github_repo) + + if len(resp['deleted_roles']) > 1: + display.display("Deleted the following roles:") + display.display("ID User Name") + display.display("------ --------------- ----------") + for role in resp['deleted_roles']: + display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) + + display.display(resp['status']) + + return True + + diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 0f809db729..ae10c5e9a4 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -255,7 +255,8 @@ ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_k PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) # galaxy related -DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') +GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com') +GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True) # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True) diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py index 00d8c25aec..62823fced4 100644 --- a/lib/ansible/galaxy/__init__.py +++ b/lib/ansible/galaxy/__init__.py @@ -52,6 +52,8 @@ class Galaxy(object): #TODO: move to getter for lazy loading self.default_readme = self._str_from_data_file('readme') self.default_meta = self._str_from_data_file('metadata_template.j2') + self.default_test = self._str_from_data_file('test_playbook.j2') + self.default_travis = self._str_from_data_file('travis.j2') def add_role(self, role): self.roles[role.name] = role diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index 2918688406..c1bf2c4ed5 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -25,11 +25,15 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json +import urllib + from urllib2 import quote as urlquote, HTTPError from urlparse import urlparse +import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url +from ansible.galaxy.token import GalaxyToken try: from __main__ import display @@ -43,45 +47,113 @@ class GalaxyAPI(object): SUPPORTED_VERSIONS = ['v1'] - def __init__(self, galaxy, api_server): + def __init__(self, galaxy): self.galaxy = galaxy + self.token = GalaxyToken() + self._api_server = C.GALAXY_SERVER + self._validate_certs = C.GALAXY_IGNORE_CERTS - try: - urlparse(api_server, scheme='https') - except: - raise AnsibleError("Invalid server API url passed: %s" % api_server) + # set validate_certs + if galaxy.options.validate_certs == False: + self._validate_certs = False + display.vvv('Check for valid certs: %s' % self._validate_certs) - server_version = self.get_server_api_version('%s/api/' % (api_server)) - if not server_version: - raise AnsibleError("Could not retrieve server API version: %s" % api_server) + # set the API server + if galaxy.options.api_server != C.GALAXY_SERVER: + self._api_server = galaxy.options.api_server + display.vvv("Connecting to galaxy_server: %s" % self._api_server) + server_version = self.get_server_api_version() + if server_version in self.SUPPORTED_VERSIONS: - self.baseurl = '%s/api/%s' % (api_server, server_version) + self.baseurl = '%s/api/%s' % (self._api_server, server_version) self.version = server_version # for future use - display.vvvvv("Base API: %s" % self.baseurl) + display.vvv("Base API: %s" % self.baseurl) else: raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) - def get_server_api_version(self, api_server): + def __auth_header(self): + token = self.token.get() + if token is None: + raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.") + return {'Authorization': 'Token ' + token} + + def __call_galaxy(self, url, args=None, headers=None, method=None): + if args and not headers: + headers = self.__auth_header() + try: + display.vvv(url) + resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method) + data = json.load(resp) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['detail']) + return data + + @property + def api_server(self): + return self._api_server + + @property + def validate_certs(self): + return self._validate_certs + + def get_server_api_version(self): """ Fetches the Galaxy API current version to ensure the API server is up and reachable. """ - #TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1) - # also should set baseurl using supported_versions which has path - return 'v1' - try: - data = json.load(open_url(api_server, validate_certs=self.galaxy.options.validate_certs)) - return data.get("current_version", 'v1') - except Exception: - # TODO: report error - return None + url = '%s/api/' % self._api_server + data = json.load(open_url(url, validate_certs=self._validate_certs)) + return data['current_version'] + except Exception as e: + raise AnsibleError("The API server (%s) is not responding, please try again later." % url) + + def authenticate(self, github_token): + """ + Retrieve an authentication token + """ + url = '%s/tokens/' % self.baseurl + args = urllib.urlencode({"github_token": github_token}) + resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST") + data = json.load(resp) + return data + def create_import_task(self, github_user, github_repo, reference=None): + """ + Post an import request + """ + url = '%s/imports/' % self.baseurl + args = urllib.urlencode({ + "github_user": github_user, + "github_repo": github_repo, + "github_reference": reference if reference else "" + }) + data = self.__call_galaxy(url, args=args) + if data.get('results', None): + return data['results'] + return data + + def get_import_task(self, task_id=None, github_user=None, github_repo=None): + """ + Check the status of an import task. + """ + url = '%s/imports/' % self.baseurl + if not task_id is None: + url = "%s?id=%d" % (url,task_id) + elif not github_user is None and not github_repo is None: + url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo) + else: + raise AnsibleError("Expected task_id or github_user and github_repo") + + data = self.__call_galaxy(url) + return data['results'] + def lookup_role_by_name(self, role_name, notify=True): """ - Find a role by name + Find a role by name. """ role_name = urlquote(role_name) @@ -92,18 +164,12 @@ class GalaxyAPI(object): if notify: display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) except: - raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name) + raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name) - display.vvvv("- %s" % (url)) - try: - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) - if len(data["results"]) != 0: - return data["results"][0] - except: - # TODO: report on connection/availability errors - pass - + data = self.__call_galaxy(url) + if len(data["results"]) != 0: + return data["results"][0] return None def fetch_role_related(self, related, role_id): @@ -114,13 +180,12 @@ class GalaxyAPI(object): try: url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results = data['results'] done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) - display.display(url) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results @@ -131,10 +196,9 @@ class GalaxyAPI(object): """ Fetch the list of items specified. """ - try: url = '%s/%s/?page_size' % (self.baseurl, what) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) if "results" in data: results = data['results'] else: @@ -144,41 +208,64 @@ class GalaxyAPI(object): done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) - display.display(url) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results except Exception as error: raise AnsibleError("Failed to download the %s list: %s" % (what, str(error))) - def search_roles(self, search, platforms=None, tags=None): + def search_roles(self, search, **kwargs): - search_url = self.baseurl + '/roles/?page=1' + search_url = self.baseurl + '/search/roles/?' if search: - search_url += '&search=' + urlquote(search) + search_url += '&autocomplete=' + urlquote(search) - if tags is None: - tags = [] - elif isinstance(tags, basestring): + tags = kwargs.get('tags',None) + platforms = kwargs.get('platforms', None) + page_size = kwargs.get('page_size', None) + author = kwargs.get('author', None) + + if tags and isinstance(tags, basestring): tags = tags.split(',') - - for tag in tags: - search_url += '&chain__tags__name=' + urlquote(tag) - - if platforms is None: - platforms = [] - elif isinstance(platforms, basestring): + search_url += '&tags_autocomplete=' + '+'.join(tags) + + if platforms and isinstance(platforms, basestring): platforms = platforms.split(',') + search_url += '&platforms_autocomplete=' + '+'.join(platforms) - for plat in platforms: - search_url += '&chain__platforms__name=' + urlquote(plat) - - display.debug("Executing query: %s" % search_url) - try: - data = json.load(open_url(search_url, validate_certs=self.galaxy.options.validate_certs)) - except HTTPError as e: - raise AnsibleError("Unsuccessful request to server: %s" % str(e)) + if page_size: + search_url += '&page_size=%s' % page_size + if author: + search_url += '&username_autocomplete=%s' % author + + data = self.__call_galaxy(search_url) + return data + + def add_secret(self, source, github_user, github_repo, secret): + url = "%s/notification_secrets/" % self.baseurl + args = urllib.urlencode({ + "source": source, + "github_user": github_user, + "github_repo": github_repo, + "secret": secret + }) + data = self.__call_galaxy(url, args=args) + return data + + def list_secrets(self): + url = "%s/notification_secrets" % self.baseurl + data = self.__call_galaxy(url, headers=self.__auth_header()) + return data + + def remove_secret(self, secret_id): + url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id) + data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') + return data + + def delete_role(self, github_user, github_repo): + url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo) + data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') return data diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2 index c618adb3d4..1054c64bdf 100644 --- a/lib/ansible/galaxy/data/metadata_template.j2 +++ b/lib/ansible/galaxy/data/metadata_template.j2 @@ -2,9 +2,11 @@ galaxy_info: author: {{ author }} description: {{description}} company: {{ company }} + # If the issue tracker for your role is not on github, uncomment the # next line and provide a value # issue_tracker_url: {{ issue_tracker_url }} + # Some suggested licenses: # - BSD (default) # - MIT @@ -13,7 +15,17 @@ galaxy_info: # - Apache # - CC-BY license: {{ license }} + min_ansible_version: {{ min_ansible_version }} + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If travis integration is cofigured, only notification for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + # # Below are all platforms currently available. Just uncomment # the ones that apply to your role. If you don't see your @@ -28,6 +40,7 @@ galaxy_info: # - {{ version }} {%- endfor %} {%- endfor %} + galaxy_tags: [] # List tags for your role here, one per line. A tag is # a keyword that describes and categorizes the role. @@ -36,6 +49,7 @@ galaxy_info: # # NOTE: A tag is limited to a single word comprised of # alphanumeric characters. Maximum 20 tags per role. + dependencies: [] # List your role dependencies here, one per line. # Be sure to remove the '[]' above if you add dependencies diff --git a/lib/ansible/galaxy/data/test_playbook.j2 b/lib/ansible/galaxy/data/test_playbook.j2 new file mode 100644 index 0000000000..45824f6051 --- /dev/null +++ b/lib/ansible/galaxy/data/test_playbook.j2 @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - {{ role_name }} \ No newline at end of file diff --git a/lib/ansible/galaxy/data/travis.j2 b/lib/ansible/galaxy/data/travis.j2 new file mode 100644 index 0000000000..36bbf6208c --- /dev/null +++ b/lib/ansible/galaxy/data/travis.j2 @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/lib/ansible/galaxy/login.py b/lib/ansible/galaxy/login.py new file mode 100644 index 0000000000..3edaed7bc7 --- /dev/null +++ b/lib/ansible/galaxy/login.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2015, Chris Houseknecht +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import getpass +import json +import urllib + +from urllib2 import quote as urlquote, HTTPError +from urlparse import urlparse + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.module_utils.urls import open_url +from ansible.utils.color import stringc + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + +class GalaxyLogin(object): + ''' Class to handle authenticating user with Galaxy API prior to performing CUD operations ''' + + GITHUB_AUTH = 'https://api.github.com/authorizations' + + def __init__(self, galaxy, github_token=None): + self.galaxy = galaxy + self.github_username = None + self.github_password = None + + if github_token == None: + self.get_credentials() + + def get_credentials(self): + display.display(u'\n\n' + "We need your " + stringc("Github login",'bright cyan') + + " to identify you.", screen_only=True) + display.display("This information will " + stringc("not be sent to Galaxy",'bright cyan') + + ", only to " + stringc("api.github.com.","yellow"), screen_only=True) + display.display("The password will not be displayed." + u'\n\n', screen_only=True) + display.display("Use " + stringc("--github-token",'yellow') + + " if you do not want to enter your password." + u'\n\n', screen_only=True) + + try: + self.github_username = raw_input("Github Username: ") + except: + pass + + try: + self.github_password = getpass.getpass("Password for %s: " % self.github_username) + except: + pass + + if not self.github_username or not self.github_password: + raise AnsibleError("Invalid Github credentials. Username and password are required.") + + def remove_github_token(self): + ''' + If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot + retrieve the token after creation, so we are forced to create a new one. + ''' + try: + tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, + url_password=self.github_password, force_basic_auth=True,)) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + + for token in tokens: + if token['note'] == 'ansible-galaxy login': + display.vvvvv('removing token: %s' % token['token_last_eight']) + try: + open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username, + url_password=self.github_password, method='DELETE', force_basic_auth=True,) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + + def create_github_token(self): + ''' + Create a personal authorization token with a note of 'ansible-galaxy login' + ''' + self.remove_github_token() + args = json.dumps({"scopes":["public_repo"], "note":"ansible-galaxy login"}) + try: + data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, + url_password=self.github_password, force_basic_auth=True, data=args)) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + return data['token'] diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py index dc9da5d79c..36b1e0fbbb 100644 --- a/lib/ansible/galaxy/role.py +++ b/lib/ansible/galaxy/role.py @@ -46,7 +46,7 @@ class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = os.path.join('meta', 'main.yml') META_INSTALL = os.path.join('meta', '.galaxy_install_info') - ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') + ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests') def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None): @@ -198,10 +198,10 @@ class GalaxyRole(object): role_data = self.src tmp_file = self.fetch(role_data) else: - api = GalaxyAPI(self.galaxy, self.options.api_server) + api = GalaxyAPI(self.galaxy) role_data = api.lookup_role_by_name(self.src) if not role_data: - raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.options.api_server)) + raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server)) role_versions = api.fetch_role_related('versions', role_data['id']) if not self.version: @@ -213,8 +213,10 @@ class GalaxyRole(object): loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] loose_versions.sort() self.version = str(loose_versions[-1]) + elif role_data.get('github_branch', None): + self.version = role_data['github_branch'] else: - self.version = 'master' + self.version = 'master' elif self.version != 'master': if role_versions and self.version not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py new file mode 100644 index 0000000000..02ca833069 --- /dev/null +++ b/lib/ansible/galaxy/token.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2015, Chris Houseknecht +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import yaml +from stat import * + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + + +class GalaxyToken(object): + ''' Class to storing and retrieving token in ~/.ansible_galaxy ''' + + def __init__(self): + self.file = os.path.expanduser("~") + '/.ansible_galaxy' + self.config = yaml.safe_load(self.__open_config_for_read()) + if not self.config: + self.config = {} + + def __open_config_for_read(self): + if os.path.isfile(self.file): + display.vvv('Opened %s' % self.file) + return open(self.file, 'r') + # config.yml not found, create and chomd u+rw + f = open(self.file,'w') + f.close() + os.chmod(self.file,S_IRUSR|S_IWUSR) # owner has +rw + display.vvv('Created %s' % self.file) + return open(self.file, 'r') + + def set(self, token): + self.config['token'] = token + self.save() + + def get(self): + return self.config.get('token', None) + + def save(self): + with open(self.file,'w') as f: + yaml.safe_dump(self.config,f,default_flow_style=False) + \ No newline at end of file From 04fc3f118f5989df4c2ba462d86a75d0b72fc50a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 08:23:45 -0800 Subject: [PATCH 210/590] Code smell test for specifying both required and default in FieldAttributes --- .travis.yml | 1 + test/code-smell/required-and-default-attributes.sh | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100755 test/code-smell/required-and-default-attributes.sh diff --git a/.travis.yml b/.travis.yml index 1ff0ca118d..603132f722 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,7 @@ script: - ./test/code-smell/replace-urlopen.sh . - ./test/code-smell/use-compat-six.sh lib - ./test/code-smell/boilerplate.sh +- ./test/code-smell/required-and-default-attributes.sh - if test x"$TOXENV" != x'py24' ; then tox ; fi - if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi #- make -C docsite all diff --git a/test/code-smell/required-and-default-attributes.sh b/test/code-smell/required-and-default-attributes.sh new file mode 100755 index 0000000000..9822a15597 --- /dev/null +++ b/test/code-smell/required-and-default-attributes.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +BASEDIR=${1-"lib/ansible"} +cd "$BASEDIR" +grep -r FieldAttribute . |grep 'default' | grep 'required' +if test $? -eq 0 ; then + exit 1 +fi +exit 0 + From c64298de02a9998d6c5774ccb1f92a9aec435d74 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:22:58 -0800 Subject: [PATCH 211/590] Revert "avoid persistent containers in attribute defaults" This reverts commit 87969868d42cd8aba1c65c8207d059d73407373b. found better way to do it --- lib/ansible/playbook/attribute.py | 11 ----------- lib/ansible/playbook/block.py | 6 +++--- lib/ansible/playbook/conditional.py | 2 +- lib/ansible/playbook/play.py | 16 ++++++++-------- lib/ansible/playbook/play_context.py | 4 ++-- lib/ansible/playbook/playbook_include.py | 2 +- lib/ansible/playbook/role/metadata.py | 2 +- lib/ansible/playbook/taggable.py | 2 +- lib/ansible/playbook/task.py | 2 +- 9 files changed, 18 insertions(+), 29 deletions(-) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index ce7ed6d8fe..703d9dbca1 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -32,17 +32,6 @@ class Attribute: self.priority = priority self.always_post_validate = always_post_validate - # This is here to avoid `default=` unwanted persistence across object instances - # We cannot rely on None as some fields use it to skip the code - # that would detect an empty container as a user error - if self.default == '_ansible_container': - if self.isa == 'list': - self.default = [] - elif self.isa == 'dict': - self.default = {} - elif self.isa == 'set': - self.default = set() - def __eq__(self, other): return other.priority == self.priority diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 66009b028a..f2d9c82833 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -30,9 +30,9 @@ from ansible.playbook.taggable import Taggable class Block(Base, Become, Conditional, Taggable): - _block = FieldAttribute(isa='list', default='_ansible_container') - _rescue = FieldAttribute(isa='list', default='_ansible_container') - _always = FieldAttribute(isa='list', default='_ansible_container') + _block = FieldAttribute(isa='list', default=[]) + _rescue = FieldAttribute(isa='list', default=[]) + _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') _delegate_facts = FieldAttribute(isa='bool', default=False) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index a5b3ca725f..fc178e2fa1 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -33,7 +33,7 @@ class Conditional: to be run conditionally when a condition is met or skipped. ''' - _when = FieldAttribute(isa='list', default='_ansible_container') + _when = FieldAttribute(isa='list', default=[]) def __init__(self, loader=None): # when used directly, this class needs a loader, but we want to diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index e08c8c6001..ed61416e95 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,22 +64,22 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default='_ansible_container', required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes - _vars_files = FieldAttribute(isa='list', default='_ansible_container', priority=99) - _vars_prompt = FieldAttribute(isa='list', default='_ansible_container', always_post_validate=True) + _vars_files = FieldAttribute(isa='list', default=[], priority=99) + _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True) _vault_password = FieldAttribute(isa='string', always_post_validate=True) # Role Attributes - _roles = FieldAttribute(isa='list', default='_ansible_container', priority=90) + _roles = FieldAttribute(isa='list', default=[], priority=90) # Block (Task) Lists Attributes - _handlers = FieldAttribute(isa='list', default='_ansible_container') - _pre_tasks = FieldAttribute(isa='list', default='_ansible_container') - _post_tasks = FieldAttribute(isa='list', default='_ansible_container') - _tasks = FieldAttribute(isa='list', default='_ansible_container') + _handlers = FieldAttribute(isa='list', default=[]) + _pre_tasks = FieldAttribute(isa='list', default=[]) + _post_tasks = FieldAttribute(isa='list', default=[]) + _tasks = FieldAttribute(isa='list', default=[]) # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False, always_post_validate=True) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index da291c3c83..81223500ad 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -171,8 +171,8 @@ class PlayContext(Base): # general flags _verbosity = FieldAttribute(isa='int', default=0) - _only_tags = FieldAttribute(isa='set', default='_ansible_container') - _skip_tags = FieldAttribute(isa='set', default='_ansible_container') + _only_tags = FieldAttribute(isa='set', default=set()) + _skip_tags = FieldAttribute(isa='set', default=set()) _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index 52081c4153..d9af2ba523 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -35,7 +35,7 @@ class PlaybookInclude(Base, Conditional, Taggable): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') - _vars = FieldAttribute(isa='dict', default='_ansible_container') + _vars = FieldAttribute(isa='dict', default=dict()) @staticmethod def load(data, basedir, variable_manager=None, loader=None): diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py index 4bb7d0ce02..58b59145a1 100644 --- a/lib/ansible/playbook/role/metadata.py +++ b/lib/ansible/playbook/role/metadata.py @@ -40,7 +40,7 @@ class RoleMetadata(Base): ''' _allow_duplicates = FieldAttribute(isa='bool', default=False) - _dependencies = FieldAttribute(isa='list', default='_ansible_container') + _dependencies = FieldAttribute(isa='list', default=[]) _galaxy_info = FieldAttribute(isa='GalaxyInfo') def __init__(self, owner=None): diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 37e3261e80..8f5cfa0934 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -29,7 +29,7 @@ from ansible.template import Templar class Taggable: untagged = frozenset(['untagged']) - _tags = FieldAttribute(isa='list', default='_ansible_container', listof=(string_types,int)) + _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int)) def __init__(self): super(Taggable, self).__init__() diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 53a9a3c393..17f1952e39 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -64,7 +64,7 @@ class Task(Base, Conditional, Taggable, Become): # will be used if defined # might be possible to define others - _args = FieldAttribute(isa='dict', default='_ansible_container') + _args = FieldAttribute(isa='dict', default=dict()) _action = FieldAttribute(isa='string') _any_errors_fatal = FieldAttribute(isa='bool') From 2820b4c243d50416f661c4ea9408bba1918244bb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:23:45 -0800 Subject: [PATCH 212/590] removed default from hosts to make it requried prevents writing a play w/o a hosts entry which would default to all/empty --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ed61416e95..bc03314864 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,7 +64,7 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes From 2bfb13bfb39bf31c5c1bc40f376907fc50ca69ef Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:28:54 -0800 Subject: [PATCH 213/590] removed unused 'pattern' from ansible.cfg also moved the config param to a 'deprecated' list in constants.py added TODO for producing a deprecation warning for such vars --- examples/ansible.cfg | 1 - lib/ansible/constants.py | 8 ++++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 87c089f45a..ec3ddf2064 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -14,7 +14,6 @@ #inventory = /etc/ansible/hosts #library = /usr/share/my_modules/ #remote_tmp = $HOME/.ansible/tmp -#pattern = * #forks = 5 #poll_interval = 15 #sudo_user = root diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index ae10c5e9a4..7f74358dd5 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -120,16 +120,20 @@ DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'd # sections in config file DEFAULTS='defaults' +# FIXME: add deprecation warning when these get set +#### DEPRECATED VARS #### +# use more sanely named 'inventory' DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True) +# this is not used since 0.5 but people might still have in config +DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None) -# generally configurable things +#### GENERALLY CONFIGURABLE THINGS #### DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True) DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') -DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8')) From ae2447df9136353453c9ed48d44b2c7fa70231b0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:38:53 -0800 Subject: [PATCH 214/590] attribute defaults that are containers are a copy This is simpler way to prevent persistent containers across instances of classes that use field attributes --- lib/ansible/playbook/attribute.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index 703d9dbca1..0befb9d80d 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from copy import deepcopy class Attribute: @@ -32,6 +33,11 @@ class Attribute: self.priority = priority self.always_post_validate = always_post_validate + if default is not None and self.isa in ('list', 'dict', 'set'): + self.default = deepcopy(default) + else: + self.default = default + def __eq__(self, other): return other.priority == self.priority From 0211da2fe9a7b3cefa79d72aab599546bf923e1b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 08:44:09 -0800 Subject: [PATCH 215/590] Clarify language of delegate_facts documentation --- docsite/rst/playbooks_delegation.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index 4e2e8c372a..c715adea36 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -137,8 +137,8 @@ Delegated facts .. versionadded:: 2.0 -Before 2.0 any facts gathered by a delegated task were assigned to the `inventory_hostname` (current host) instead of the host which actually produced the facts (delegated to host). -The new directive `delegate_facts` if set to `True` will assing the task's gathered facts to the delegated host instead of the current one.:: +By default, any fact gathered by a delegated task are assigned to the `inventory_hostname` (the current host) instead of the host which actually produced the facts (the delegated to host). +In 2.0, the directive `delegate_facts` may be set to `True` to assign the task's gathered facts to the delegated host instead of the current one.:: - hosts: app_servers @@ -149,8 +149,8 @@ The new directive `delegate_facts` if set to `True` will assing the task's gathe delegate_facts: True with_items: "{{groups['dbservers'}}" -The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers, -that way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. +The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers. +This way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. .. _run_once: From 7936a4687e9be3752bdbee006d956ed4f2687160 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 10:01:21 -0800 Subject: [PATCH 216/590] adhoc avoids callbacks by default as it did before Previous emptying of whitelist only affected callbacks that were constructed for need whitelist. This now works for all callbacks. --- lib/ansible/cli/adhoc.py | 4 +--- lib/ansible/executor/task_queue_manager.py | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 912b07a5c7..f6dcb37a8a 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,9 +163,6 @@ class AdHocCLI(CLI): else: cb = 'minimal' - if not C.DEFAULT_LOAD_CALLBACK_PLUGINS: - C.DEFAULT_CALLBACK_WHITELIST = [] - if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree @@ -180,6 +177,7 @@ class AdHocCLI(CLI): options=self.options, passwords=passwords, stdout_callback=cb, + run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, ) result = self._tqm.run(play) finally: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index d665000046..70cefee510 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -56,7 +56,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None): + def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True): self._inventory = inventory self._variable_manager = variable_manager @@ -65,6 +65,7 @@ class TaskQueueManager: self._stats = AggregateStats() self.passwords = passwords self._stdout_callback = stdout_callback + self._run_additional_callbacks = run_additional_callbacks self._callbacks_loaded = False self._callback_plugins = [] @@ -159,7 +160,7 @@ class TaskQueueManager: if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True - elif callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST): + elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)): continue self._callback_plugins.append(callback_plugin()) From 04d74fd6804b5a851cc8762cecf07b100e4dcc6f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 10:13:50 -0800 Subject: [PATCH 217/590] reenabled --tree for ansible adhoc command previous fix to avoid callbacks now conflicted with tree optoin which is implemented as a callback in 2.0 --- lib/ansible/cli/adhoc.py | 3 +++ lib/ansible/executor/task_queue_manager.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index f6dcb37a8a..3de0e55b7b 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,9 +163,11 @@ class AdHocCLI(CLI): else: cb = 'minimal' + run_tree=False if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree + run_tree=True # now create a task queue manager to execute the play self._tqm = None @@ -178,6 +180,7 @@ class AdHocCLI(CLI): passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, + run_tree=run_tree, ) result = self._tqm.run(play) finally: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 70cefee510..7411138293 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -56,7 +56,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True): + def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False): self._inventory = inventory self._variable_manager = variable_manager @@ -66,6 +66,7 @@ class TaskQueueManager: self.passwords = passwords self._stdout_callback = stdout_callback self._run_additional_callbacks = run_additional_callbacks + self._run_tree = run_tree self._callbacks_loaded = False self._callback_plugins = [] @@ -160,6 +161,8 @@ class TaskQueueManager: if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True + elif callback_name == 'tree' and self._run_tree: + pass elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)): continue From 14e19c239d610619498f06978e2841764a262e15 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 9 Dec 2015 14:51:43 -0500 Subject: [PATCH 218/590] Make on_file_diff callback item-aware --- lib/ansible/plugins/callback/__init__.py | 6 +++++- lib/ansible/plugins/callback/default.py | 9 ++++++++- lib/ansible/plugins/callback/skippy.py | 9 ++++++++- lib/ansible/plugins/strategy/__init__.py | 2 +- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 03eb58d99d..b8a48943f2 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -59,6 +59,10 @@ class CallbackBase: version = getattr(self, 'CALLBACK_VERSION', '1.0') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) + def _copy_result(self, result): + ''' helper for callbacks, so they don't all have to include deepcopy ''' + return deepcopy(result) + def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result")) @@ -126,7 +130,7 @@ class CallbackBase: def _process_items(self, result): for res in result._result['results']: - newres = deepcopy(result) + newres = self._copy_result(result) res['item'] = self._get_item(res) newres._result = res if 'failed' in res and res['failed']: diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 3175bf3e53..1f37f4b975 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -134,7 +134,14 @@ class CallbackModule(CallbackBase): self._display.banner(msg) def v2_on_file_diff(self, result): - if 'diff' in result._result and result._result['diff']: + if result._task.loop and 'results' in result._result: + for res in result._result['results']: + newres = self._copy_result(result) + res['item'] = self._get_item(res) + newres._result = res + + self.v2_on_file_diff(newres) + elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) def v2_playbook_item_on_ok(self, result): diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 15b7d3387c..495943417f 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -123,7 +123,14 @@ class CallbackModule(CallbackBase): self._display.banner(msg) def v2_on_file_diff(self, result): - if 'diff' in result._result and result._result['diff']: + if result._task.loop and 'results' in result._result: + for res in result._result['results']: + newres = self._copy_result(result) + res['item'] = self._get_item(res) + newres._result = res + + self.v2_on_file_diff(newres) + elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) def v2_playbook_item_on_ok(self, result): diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 732a9293d2..15636b580d 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -221,7 +221,7 @@ class StrategyBase: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) - if self._diff and 'diff' in task_result._result: + if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 From 61dc4a7e67bcb7c968e273ee39618d1f76f7ab9e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 12:10:21 -0800 Subject: [PATCH 219/590] Update module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 191347676e..0b5555b62c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 191347676eea08817da3fb237f24cdbf2d16e307 +Subproject commit 0b5555b62cd8d91fb4fa434217671f3acaebbf5a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index a10bdd6be9..cbed642009 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit a10bdd6be948d3aa5fad7ff4959908d6e78e0528 +Subproject commit cbed642009497ddaf19b5f578ab6c78da1356eda From 64864829c4a858e296b049075675e960de678690 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 12:37:56 -0800 Subject: [PATCH 220/590] changed deprecation to removal warning --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index d7d0f03fb1..3c1331e706 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -388,7 +388,7 @@ class Inventory(object): end = -1 subscript = (int(start), int(end)) if sep == '-': - display.deprecated("Use [x:y] inclusive subscripts instead of [x-y]", version=2.0, removed=True) + display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed") return (pattern, subscript) From 07bf4d9ac4899eb2e0e8246530ff2ca3ee75f3ef Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 9 Dec 2015 15:48:53 -0500 Subject: [PATCH 221/590] added winrm CP notes to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d31ef4ebb..2bf11e6c5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,8 @@ newline being stripped you can change your playbook like this: ###Plugins * Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases +* WinRM connection plugin passes all vars named `ansible_winrm_*` to the underlying pywinrm client. This allows, for instance, `ansible_winrm_server_cert_validation=ignore` to be used with newer versions of pywinrm to disable certificate validation on Python 2.7.9+. +* WinRM connection plugin put_file is significantly faster and no longer has file size limitations. ####Deprecated Modules (new ones in parens): From c0d79cf7e10da157ae1b28283ab7b564baee7b51 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 13:07:00 -0800 Subject: [PATCH 222/590] Remove the funcd connection plugin --- lib/ansible/plugins/connection/funcd.py | 99 ------------------------- 1 file changed, 99 deletions(-) delete mode 100644 lib/ansible/plugins/connection/funcd.py diff --git a/lib/ansible/plugins/connection/funcd.py b/lib/ansible/plugins/connection/funcd.py deleted file mode 100644 index 4c9e09be65..0000000000 --- a/lib/ansible/plugins/connection/funcd.py +++ /dev/null @@ -1,99 +0,0 @@ -# Based on local.py (c) 2012, Michael DeHaan -# Based on chroot.py (c) 2013, Maykel Moya -# (c) 2013, Michael Scherer -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# --- -# The func transport permit to use ansible over func. For people who have already setup -# func and that wish to play with ansible, this permit to move gradually to ansible -# without having to redo completely the setup of the network. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -HAVE_FUNC=False -try: - import func.overlord.client as fc - HAVE_FUNC=True -except ImportError: - pass - -import os -from ansible.callbacks import vvv -from ansible import errors -import tempfile -import shutil - - -class Connection(object): - ''' Func-based connections ''' - - def __init__(self, runner, host, port, *args, **kwargs): - self.runner = runner - self.host = host - self.has_pipelining = False - # port is unused, this go on func - self.port = port - - def connect(self, port=None): - if not HAVE_FUNC: - raise errors.AnsibleError("func is not installed") - - self.client = fc.Client(self.host) - return self - - def exec_command(self, cmd, become_user=None, sudoable=False, - executable='/bin/sh', in_data=None): - ''' run a command on the remote minion ''' - - if in_data: - raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - - # totally ignores privlege escalation - vvv("EXEC %s" % (cmd), host=self.host) - p = self.client.command.run(cmd)[self.host] - return (p[0], p[1], p[2]) - - def _normalize_path(self, path, prefix): - if not path.startswith(os.path.sep): - path = os.path.join(os.path.sep, path) - normpath = os.path.normpath(path) - return os.path.join(prefix, normpath[1:]) - - def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' - - out_path = self._normalize_path(out_path, '/') - vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) - self.client.local.copyfile.send(in_path, out_path) - - def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' - - in_path = self._normalize_path(in_path, '/') - vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - # need to use a tmp dir due to difference of semantic for getfile - # ( who take a # directory as destination) and fetch_file, who - # take a file directly - tmpdir = tempfile.mkdtemp(prefix="func_ansible") - self.client.local.getfile.get(in_path, tmpdir) - shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), - out_path) - shutil.rmtree(tmpdir) - - def close(self): - ''' terminate the connection; nothing to do here ''' - pass From 18ac12aee60b0033d4b8af4a78ddbd55335c2991 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Wed, 9 Dec 2015 22:08:30 +0100 Subject: [PATCH 223/590] Do not fail when variable is not correct in debug action. See https://github.com/ansible/ansible/issues/13484 for more information. --- lib/ansible/plugins/action/debug.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index 1d8e28c7a4..a0ffb71404 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -40,7 +40,7 @@ class ActionModule(ActionBase): result['msg'] = self._task.args['msg'] # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - results = self._templar.template(self._task.args['var'], convert_bare=True) + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=False) if type(self._task.args['var']) in (list, dict): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results From a7cd41b482dc6bf1bf1073e451aa1b38526dde08 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 9 Dec 2015 16:29:39 -0500 Subject: [PATCH 224/590] Windows doc updates --- docsite/rst/intro_windows.rst | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index e5cbb94faf..1adcc35010 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -31,7 +31,7 @@ On a Linux control machine:: Active Directory Support ++++++++++++++++++++++++ -If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module and the MIT krb5 libraries it depends on. +If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module on the Ansible control host (and the MIT krb5 libraries it depends on). The Ansible control host also requires a properly configured computer account in Active Directory. Installing python-kerberos dependencies --------------------------------------- @@ -131,7 +131,9 @@ To test this, ping the windows host you want to control by name then use the ip If you get different hostnames back than the name you originally pinged, speak to your active directory administrator and get them to check that DNS Scavenging is enabled and that DNS and DHCP are updating each other. -Check your ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted. +Ensure that the Ansible controller has a properly configured computer account in the domain. + +Check your Ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted. Check you are using the real fully qualified domain name for the domain. Sometimes domains are commonly known to users by aliases. To check this run: @@ -165,6 +167,8 @@ In group_vars/windows.yml, define the following inventory variables:: ansible_password: SecretPasswordGoesHere ansible_port: 5986 ansible_connection: winrm + # The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates: + ansible_winrm_server_cert_validation: ignore Although Ansible is mostly an SSH-oriented system, Windows management will not happen over SSH (`yet `). @@ -189,6 +193,7 @@ Since 2.0, the following custom inventory variables are also supported for addit * ``ansible_winrm_path``: Specify an alternate path to the WinRM endpoint. Ansible uses ``/wsman`` by default. * ``ansible_winrm_realm``: Specify the realm to use for Kerberos authentication. If the username contains ``@``, Ansible will use the part of the username after ``@`` by default. * ``ansible_winrm_transport``: Specify one or more transports as a comma-separated list. By default, Ansible will use ``kerberos,plaintext`` if the ``kerberos`` module is installed and a realm is defined, otherwise ``plaintext``. +* ``ansible_winrm_server_cert_validation``: Specify the server certificate validation mode (``ignore`` or ``validate``). Ansible defaults to ``validate`` on Python 2.7.9 and higher, which will result in certificate validation errors against the Windows self-signed certificates. Unless verifiable certificates have been configured on the WinRM listeners, this should be set to ``ignore`` * ``ansible_winrm_*``: Any additional keyword arguments supported by ``winrm.Protocol`` may be provided. .. _windows_system_prep: @@ -221,7 +226,7 @@ Getting to PowerShell 3.0 or higher PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. Note that PowerShell 3.0 is only supported on Windows 7 SP1, Windows Server 2008 SP1, and later releases of Windows. -Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. +Looking at an Ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. .. _what_windows_modules_are_available: @@ -248,10 +253,10 @@ Note there are a few other Ansible modules that don't start with "win" that also Developers: Supported modules and how it works `````````````````````````````````````````````` -Developing ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix. -What if you want to write Windows modules for ansible though? +Developing Ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix. +What if you want to write Windows modules for Ansible though? -For Windows, ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. +For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules live in a "windows/" subfolder in the Ansible "library/" subtree. For example, if a module is named "library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. @@ -351,7 +356,7 @@ form of new modules, tweaks to existing modules, documentation, or something els :doc:`developing_modules` How to write modules :doc:`playbooks` - Learning ansible's configuration management language + Learning Ansible's configuration management language `List of Windows Modules `_ Windows specific module list, all implemented in PowerShell `Mailing List `_ From 62cbc03af6410df2b9c61a5056f71a51dd2570ec Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 13:29:53 -0800 Subject: [PATCH 225/590] Revert "Remove the funcd connection plugin" This reverts commit c0d79cf7e10da157ae1b28283ab7b564baee7b51. We may still port the funcd connection plugin, just not in time for 2.0.0 --- lib/ansible/plugins/connection/funcd.py | 99 +++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 lib/ansible/plugins/connection/funcd.py diff --git a/lib/ansible/plugins/connection/funcd.py b/lib/ansible/plugins/connection/funcd.py new file mode 100644 index 0000000000..4c9e09be65 --- /dev/null +++ b/lib/ansible/plugins/connection/funcd.py @@ -0,0 +1,99 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# --- +# The func transport permit to use ansible over func. For people who have already setup +# func and that wish to play with ansible, this permit to move gradually to ansible +# without having to redo completely the setup of the network. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +HAVE_FUNC=False +try: + import func.overlord.client as fc + HAVE_FUNC=True +except ImportError: + pass + +import os +from ansible.callbacks import vvv +from ansible import errors +import tempfile +import shutil + + +class Connection(object): + ''' Func-based connections ''' + + def __init__(self, runner, host, port, *args, **kwargs): + self.runner = runner + self.host = host + self.has_pipelining = False + # port is unused, this go on func + self.port = port + + def connect(self, port=None): + if not HAVE_FUNC: + raise errors.AnsibleError("func is not installed") + + self.client = fc.Client(self.host) + return self + + def exec_command(self, cmd, become_user=None, sudoable=False, + executable='/bin/sh', in_data=None): + ''' run a command on the remote minion ''' + + if in_data: + raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + + # totally ignores privlege escalation + vvv("EXEC %s" % (cmd), host=self.host) + p = self.client.command.run(cmd)[self.host] + return (p[0], p[1], p[2]) + + def _normalize_path(self, path, prefix): + if not path.startswith(os.path.sep): + path = os.path.join(os.path.sep, path) + normpath = os.path.normpath(path) + return os.path.join(prefix, normpath[1:]) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to remote ''' + + out_path = self._normalize_path(out_path, '/') + vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self.client.local.copyfile.send(in_path, out_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from remote to local ''' + + in_path = self._normalize_path(in_path, '/') + vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + # need to use a tmp dir due to difference of semantic for getfile + # ( who take a # directory as destination) and fetch_file, who + # take a file directly + tmpdir = tempfile.mkdtemp(prefix="func_ansible") + self.client.local.getfile.get(in_path, tmpdir) + shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), + out_path) + shutil.rmtree(tmpdir) + + def close(self): + ''' terminate the connection; nothing to do here ''' + pass From a19e083d33ae5ae59be358c9468a4318aca3174f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 13:52:01 -0800 Subject: [PATCH 226/590] Note that handlers inside of includes are not possible at the moment --- docsite/rst/playbooks_intro.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index e0f1aec5c1..28c809f013 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -386,6 +386,7 @@ won't need them for much else. * Handler names live in a global namespace. * If two handler tasks have the same name, only one will run. `* `_ + * You cannot notify a handler that is defined inside of an include Roles are described later on, but it's worthwhile to point out that: From a61387846d3e210181683a60df14c8e7cbf46893 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 7 Dec 2015 10:22:07 -0800 Subject: [PATCH 227/590] draft release documentation --- docsite/rst/developing_releases.rst | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 docsite/rst/developing_releases.rst diff --git a/docsite/rst/developing_releases.rst b/docsite/rst/developing_releases.rst new file mode 100644 index 0000000000..1eeb242121 --- /dev/null +++ b/docsite/rst/developing_releases.rst @@ -0,0 +1,48 @@ +Releases +======== + +.. contents:: Topics + :local: + +.. schedule:: + +Release Schedule +```````````````` +Ansible is on a 'flexible' 4 month release schedule, sometimes this can be extended if there is a major change that requires a longer cycle (i.e. 2.0 core rewrite). +Currently modules get released at the same time as the main Ansible repo, even though they are separated into ansible-modules-core and ansible-modules-extras. + +The major features and bugs fixed in a release should be reflected in the CHANGELOG.md, minor ones will be in the commit history (FIXME: add git exmaple to list). +When a fix/feature gets added to the `devel` branch it will be part of the next release, some bugfixes can be backported to previous releases and might be part of a minor point release if it is deemed necessary. + +Sometimes an RC can be extended by a few days if a bugfix makes a change that can have far reaching consequences, so users have enough time to find any new issues that may stem from this. + +.. methods:: + +Release methods +```````````````` + +Ansible normally goes through a 'release candidate', issuing an RC1 for a release, if no major bugs are discovered in it after 5 business days we'll get a final release. +Otherwise fixes will be applied and an RC2 will be provided for testing and if no bugs after 2 days, the final release will be made, iterating this last step and incrementing the candidate number as we find major bugs. + + +.. freezing:: + +Release feature freeze +`````````````````````` + +During the release candidate process, the focus will be on bugfixes that affect the RC, new features will be delayed while we try to produce a final version. Some bugfixes that are minor or don't affect the RC will also be postponed until after the release is finalized. + +.. seealso:: + + :doc:`developing_api` + Python API to Playbooks and Ad Hoc Task Execution + :doc:`developing_modules` + How to develop modules + :doc:`developing_plugins` + How to develop plugins + `Ansible Tower `_ + REST API endpoint and GUI for Ansible, syncs with dynamic inventory + `Development Mailing List `_ + Mailing list for development topics + `irc.freenode.net `_ + #ansible IRC chat channel From 2b363434514aa94aad145d2a6eacf4c1013490d8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 9 Dec 2015 17:57:52 -0500 Subject: [PATCH 228/590] Missed one place we were appending the incorrectly escaped item to raw params --- lib/ansible/parsing/splitter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index f24d8ecf9d..feb0cd2b34 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -86,7 +86,7 @@ def parse_kv(args, check_raw=False): # FIXME: make the retrieval of this list of shell/command # options a function, so the list is centralized if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'): - raw_params.append(x) + raw_params.append(orig_x) else: options[k.strip()] = unquote(v.strip()) else: From 30e729557f0056ec561288046e2aa933efe899b3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 16:43:24 -0800 Subject: [PATCH 229/590] Add first draft of porting guide for 2.0 --- docsite/rst/porting_guide_2.0.rst | 160 ++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 docsite/rst/porting_guide_2.0.rst diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst new file mode 100644 index 0000000000..9c26a4b161 --- /dev/null +++ b/docsite/rst/porting_guide_2.0.rst @@ -0,0 +1,160 @@ +Porting Guide +============= + + +Playbook +-------- + +* backslash escapes When specifying parameters in jinja2 expressions in YAML + dicts, backslashes sometimes needed to be escaped twice. This has been fixed + in 2.0.x so that escaping once works. The following example shows how + playbooks must be modified:: + + # Syntax in 1.9.x + - debug: + msg: "{{ 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') }}" + # Syntax in 2.0.x + - debug: + msg: "{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" + + # Output: + "msg": "test1 1\\3" + +To make an escaped string that will work on all versions you have two options:: + +- debug: msg="{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" + +uses key=value escaping which has not changed. The other option is to check for the ansible version:: + +"{{ (ansible_version|version_compare('ge', '2.0'))|ternary( 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') , 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') ) }}" + +* trailing newline When a string with a trailing newline was specified in the + playbook via yaml dict format, the trailing newline was stripped. When + specified in key=value format, the trailing newlines were kept. In v2, both + methods of specifying the string will keep the trailing newlines. If you + relied on the trailing newline being stripped, you can change your playbook + using the following as an example:: + + # Syntax in 1.9.x + vars: + message: > + Testing + some things + tasks: + - debug: + msg: "{{ message }}" + + # Syntax in 2.0.x + vars: + old_message: > + Testing + some things + message: "{{ old_messsage[:-1] }}" + - debug: + msg: "{{ message }}" + # Output + "msg": "Testing some things" + +* porting task includes + * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. + * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 + * variable precedence +* templating (variables in playbooks and template lookups) has improved with regard to keeping the original instead of turning everything into a string. + If you need the old behavior, quote the value to pass it around as a string. + Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. + You can override the `null_representation` setting to an empty string in your config file by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. +* Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed. +* dnf module has been rewritten. Some minor changes in behavior may be observed. +* win_updates has been rewritten and works as expected now. + +Deprecated +---------- + +While all items listed here will show a deprecation warning message, they still work as they did in 1.9.x. Please note that they will be removed in 2.2 (Ansible always waits two major releases to remove a deprecated feature). + +* Bare variables in with_ loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. +* The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead. +* Undefined variables within a with_ loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. +* Using variables for task parameters is unsafe and will be removed in a future version. For example:: + + - hosts: localhost + gather_facts: no + vars: + debug_params: + msg: "hello there" + tasks: + - debug: "{{debug_params}}" + +* Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern. +* Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y]. +* Playbooks using privilege escalation should always use “become*” options rather than the old su*/sudo* options. +* The “short form” for vars_prompt is no longer supported. +For example:: + +vars_prompt: + variable_name: "Prompt string" + +* Specifying variables at the top level of a task include statement is no longer supported. For example:: + + - include: foo.yml + a: 1 + +Should now be:: + +- include: foo.yml + args: + a: 1 + +* Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only. +* Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’. +* Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. +For example:: + + - include: foo.yml tags=a,b,c + +Should be:: + + - include: foo.yml + tags: [a, b, c] + +* The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin. + + +Porting plugins +=============== + +In ansible-1.9.x, you would generally copy an existing plugin to create a new one. Simply implementing the methods and attributes that the caller of the plugin expected made it a plugin of that type. In ansible-2.0, most plugins are implemented by subclassing a base class for each plugin type. This way the custom plugin does not need to contain methods which are not customized. + +.. note:: + +Lookup plugins +-------------- +* lookup plugins ; import version + + +Connection plugins +------------------ + +* connection plugins + +Action plugins +-------------- + +* action plugins + +Callback plugins +---------------- + +* callback plugins + +Connection plugins +------------------ + +* connection plugins + + +Porting custom scripts +====================== + +Custom scripts that used the ``ansible.runner.Runner`` API in 1.x have to be ported in 2.x. Please refer to: +https://github.com/ansible/ansible/blob/devel/docsite/rst/developing_api.rst From fe72fff57da967ff0e53c8026bcd94d67cdb59db Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Thu, 10 Dec 2015 01:58:17 +0100 Subject: [PATCH 230/590] Fix the markdown used for the Windows module section --- docsite/rst/developing_modules.rst | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index bdee4aa83d..fde4b5704b 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -538,24 +538,34 @@ Windows modules checklist #!powershell -then:: + then:: + -then:: + + then:: + # WANT_JSON # POWERSHELL_COMMON -then, to parse all arguments into a variable modules generally use:: + then, to parse all arguments into a variable modules generally use:: + $params = Parse-Args $args * Arguments: * Try and use state present and state absent like other modules * You need to check that all your mandatory args are present. You can do this using the builtin Get-AnsibleParam function. * Required arguments:: + $package = Get-AnsibleParam -obj $params -name name -failifempty $true + * Required arguments with name validation:: + $state = Get-AnsibleParam -obj $params -name "State" -ValidateSet "Present","Absent" -resultobj $resultobj -failifempty $true + * Optional arguments with name validation:: + $state = Get-AnsibleParam -obj $params -name "State" -default "Present" -ValidateSet "Present","Absent" + * the If "FailIfEmpty" is true, the resultobj parameter is used to specify the object returned to fail-json. You can also override the default message using $emptyattributefailmessage (for missing required attributes) and $ValidateSetErrorMessage (for attribute validation errors) * Look at existing modules for more examples of argument checking. @@ -586,7 +596,7 @@ Starting in 1.8 you can deprecate modules by renaming them with a preceding _, i _old_cloud.py, This will keep the module available but hide it from the primary docs and listing. You can also rename modules and keep an alias to the old name by using a symlink that starts with _. -This example allows the stat module to be called with fileinfo, making the following examples equivalent +This example allows the stat module to be called with fileinfo, making the following examples equivalent:: EXAMPLES = ''' ln -s stat.py _fileinfo.py From c20c1a6d490933fa2ec8961508735422f3a6adeb Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 10 Dec 2015 11:16:21 +0100 Subject: [PATCH 231/590] add depth option to ansible-pull Allows shallow checkouts in ansible-pull by adding `--depth 1` (or higher number) --- lib/ansible/cli/pull.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 593d601e8d..67e8925930 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -80,6 +80,8 @@ class PullCLI(CLI): help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') + self.parser.add_option('--depth', dest='depth', default=None, + help='Depth of checkout, shallow checkout if greater or equal 1 . Defaults to full checkout.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -154,6 +156,10 @@ class PullCLI(CLI): if self.options.verify: repo_opts += ' verify_commit=yes' + + if self.options.depth: + repo_opts += ' depth=%s' % self.options.depth + path = module_loader.find_plugin(self.options.module_name) if path is None: From 6680cc7052dd4ef5bb166008a18a57e0f156df95 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Thu, 10 Dec 2015 08:04:06 -0500 Subject: [PATCH 232/590] allow custom callbacks with adhoc cli for scripting missing import of CallbackBase --- lib/ansible/cli/__init__.py | 3 ++- lib/ansible/cli/adhoc.py | 4 +++- lib/ansible/executor/task_queue_manager.py | 11 +++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index da1aabcc69..a934a3a8ee 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -66,7 +66,7 @@ class CLI(object): LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) # -S (chop long lines) -X (disable termcap init and de-init) - def __init__(self, args): + def __init__(self, args, callback=None): """ Base init method for all command line programs """ @@ -75,6 +75,7 @@ class CLI(object): self.options = None self.parser = None self.action = None + self.callback = callback def set_action(self): """ diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 3de0e55b7b..250241a848 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -158,7 +158,9 @@ class AdHocCLI(CLI): play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) - if self.options.one_line: + if self.callback: + cb = self.callback + elif self.options.one_line: cb = 'oneline' else: cb = 'minimal' diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 7411138293..e2b29a5282 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -34,6 +34,7 @@ from ansible.playbook.play_context import PlayContext from ansible.plugins import callback_loader, strategy_loader, module_loader from ansible.template import Templar from ansible.vars.hostvars import HostVars +from ansible.plugins.callback import CallbackBase try: from __main__ import display @@ -146,8 +147,14 @@ class TaskQueueManager: if self._stdout_callback is None: self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK - if self._stdout_callback not in callback_loader: - raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) + if isinstance(self._stdout_callback, CallbackBase): + self._callback_plugins.append(self._stdout_callback) + stdout_callback_loaded = True + elif isinstance(self._stdout_callback, basestring): + if self._stdout_callback not in callback_loader: + raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) + else: + raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin") for callback_plugin in callback_loader.all(class_only=True): if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: From 72f0679f685dc6c79fe80736d2ca72f6778b8e5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Warcho=C5=82?= Date: Thu, 10 Dec 2015 16:22:37 +0100 Subject: [PATCH 233/590] Explain how 'run_once' interacts with 'serial' --- docsite/rst/playbooks_delegation.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index c715adea36..fa808abb65 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -182,13 +182,18 @@ This can be optionally paired with "delegate_to" to specify an individual host t delegate_to: web01.example.org When "run_once" is not used with "delegate_to" it will execute on the first host, as defined by inventory, -in the group(s) of hosts targeted by the play. e.g. webservers[0] if the play targeted "hosts: webservers". +in the group(s) of hosts targeted by the play - e.g. webservers[0] if the play targeted "hosts: webservers". -This approach is similar, although more concise and cleaner than applying a conditional to a task such as:: +This approach is similar to applying a conditional to a task such as:: - command: /opt/application/upgrade_db.py when: inventory_hostname == webservers[0] +.. note:: + When used together with "serial", tasks marked as "run_once" will be ran on one host in *each* serial batch. + If it's crucial that the task is run only once regardless of "serial" mode, use + :code:`inventory_hostname == my_group_name[0]` construct. + .. _local_playbooks: Local Playbooks From 1dda8158ff9aa5240e89711c7279c3d072e0e57e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 10 Dec 2015 07:28:58 -0800 Subject: [PATCH 234/590] become_pass needs to be bytes when it is passed to ssh. Fixes #13240 --- lib/ansible/plugins/connection/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index debe36bd32..4251f8a63e 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -463,7 +463,7 @@ class Connection(ConnectionBase): if states[state] == 'awaiting_prompt': if self._flags['become_prompt']: display.debug('Sending become_pass in response to prompt') - stdin.write(self._play_context.become_pass + '\n') + stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass ))) self._flags['become_prompt'] = False state += 1 elif self._flags['become_success']: From bd9582d0721db3c6e5e24b08c747e02a6391a0a7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 10 Dec 2015 08:10:45 -0800 Subject: [PATCH 235/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0b5555b62c..0d23b3df52 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0b5555b62cd8d91fb4fa434217671f3acaebbf5a +Subproject commit 0d23b3df526875c8fc6edf94268f3aa850ec05f1 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index cbed642009..51813e0033 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit cbed642009497ddaf19b5f578ab6c78da1356eda +Subproject commit 51813e003331c3341b07c5cda33346cada537a3b From c402325085c129ce289c73a808d8d6ac68df096d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 10 Dec 2015 13:10:17 -0500 Subject: [PATCH 236/590] Fixing up docker integration tests a bit --- .../roles/test_docker/tasks/docker-tests.yml | 31 +++---------------- .../test_docker/tasks/registry-tests.yml | 11 ++----- 2 files changed, 8 insertions(+), 34 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml index 33ffe6c70c..14e23f72dd 100644 --- a/test/integration/roles/test_docker/tasks/docker-tests.yml +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -8,7 +8,6 @@ image: busybox state: present pull: missing - docker_api_version: "1.14" - name: Run a small script in busybox docker: @@ -17,22 +16,12 @@ pull: always command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True - docker_api_version: "1.14" - -- name: Get the docker container id - shell: "docker ps | grep busybox | awk '{ print $1 }'" - register: container_id - name: Get the docker container ip - shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" - register: container_ip - -- name: Pause a few moments because docker is not reliable - pause: - seconds: 40 + set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}" - name: Try to access the server - shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + shell: "echo 'world' | nc {{ container_ip }} 2000" register: docker_output - name: check that the script ran @@ -49,22 +38,12 @@ TEST: hello command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"' detach: True - docker_api_version: "1.14" - -- name: Get the docker container id - shell: "docker ps | grep busybox | awk '{ print $1 }'" - register: container_id - name: Get the docker container ip - shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" - register: container_ip - -- name: Pause a few moments because docker is not reliable - pause: - seconds: 40 + set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}" - name: Try to access the server - shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + shell: "echo 'world' | nc {{ container_ip }} 2000" register: docker_output - name: check that the script ran @@ -73,7 +52,7 @@ - "'hello world' in docker_output.stdout_lines" - name: Remove containers - shell: "docker rm $(docker ps -aq)" + shell: "docker rm -f $(docker ps -aq)" - name: Remove all images from the local docker shell: "docker rmi -f $(docker images -q)" diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index 57b4d25277..1ef330da5f 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -19,11 +19,8 @@ - name: Push docker image into the private registry command: "docker push localhost:5000/mine" -- name: Remove containers - shell: "docker rm $(docker ps -aq)" - - name: Remove all images from the local docker - shell: "docker rmi -f $(docker images -q)" + shell: "docker rmi -f {{image_id.stdout_lines[0]}}" - name: Get number of images in docker command: "docker images" @@ -41,7 +38,6 @@ state: present pull: missing insecure_registry: True - docker_api_version: "1.14" - name: Run a small script in the new image docker: @@ -51,7 +47,6 @@ command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True insecure_registry: True - docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" @@ -76,8 +71,9 @@ - name: Remove containers - shell: "docker rm $(docker ps -aq)" + shell: "docker rm -f $(docker ps -aq)" +- shell: docker images -q - name: Remove all images from the local docker shell: "docker rmi -f $(docker images -q)" @@ -157,7 +153,6 @@ state: running command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True - docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" From a6a58d6947912328fd48e26ea1335bd9314f0135 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Thu, 10 Dec 2015 16:39:27 -0500 Subject: [PATCH 237/590] fix default host for non vcd service types --- lib/ansible/module_utils/vca.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py index 56341ec555..ef89d54556 100644 --- a/lib/ansible/module_utils/vca.py +++ b/lib/ansible/module_utils/vca.py @@ -108,7 +108,10 @@ class VcaAnsibleModule(AnsibleModule): def create_instance(self): service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE) - host = self.params.get('host', LOGIN_HOST.get('service_type')) + if service_type == 'vcd': + host = self.params['host'] + else: + host = LOGIN_HOST[service_type] username = self.params['username'] version = self.params.get('api_version') From 37c4e9aee34df2f421942e86c8afd1fef2bee5f6 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Fri, 11 Dec 2015 07:11:48 +0530 Subject: [PATCH 238/590] Clean up debug logging around _low_level_execute_command We were logging the command to be executed many times, which made debug logs very hard to read. Now we do it only once. Also makes the logged ssh command line cut-and-paste-able (the lack of which has confused a number of people by now; the problem being that we pass the command as a single argument to execve(), so it doesn't need an extra level of quoting as it does when you try to run it by hand). --- lib/ansible/plugins/action/__init__.py | 25 ++++++------------------- lib/ansible/plugins/connection/ssh.py | 2 +- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 497143224a..154404e474 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -202,9 +202,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): tmp_mode = 0o755 cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) - display.debug("executing _low_level_execute_command to create the tmp path") result = self._low_level_execute_command(cmd, sudoable=False) - display.debug("done with creation of tmp path") # error handling on this seems a little aggressive? if result['rc'] != 0: @@ -249,9 +247,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): cmd = self._connection._shell.remove(tmp_path, recurse=True) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. - display.debug("calling _low_level_execute_command to remove the tmp path") self._low_level_execute_command(cmd, sudoable=False) - display.debug("done removing the tmp path") def _transfer_data(self, remote_path, data): ''' @@ -286,9 +282,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): ''' cmd = self._connection._shell.chmod(mode, path) - display.debug("calling _low_level_execute_command to chmod the remote path") res = self._low_level_execute_command(cmd, sudoable=sudoable) - display.debug("done with chmod call") return res def _remote_checksum(self, path, all_vars): @@ -299,9 +293,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): python_interp = all_vars.get('ansible_python_interpreter', 'python') cmd = self._connection._shell.checksum(path, python_interp) - display.debug("calling _low_level_execute_command to get the remote checksum") data = self._low_level_execute_command(cmd, sudoable=True) - display.debug("done getting the remote checksum") try: data2 = data['stdout'].strip().splitlines()[-1] if data2 == u'': @@ -329,9 +321,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): expand_path = '~%s' % self._play_context.become_user cmd = self._connection._shell.expand_user(expand_path) - display.debug("calling _low_level_execute_command to expand the remote user path") data = self._low_level_execute_command(cmd, sudoable=False) - display.debug("done expanding the remote user path") #initial_fragment = utils.last_non_blank_line(data['stdout']) initial_fragment = data['stdout'].strip().splitlines()[-1] @@ -448,9 +438,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): # specified in the play, not the sudo_user sudoable = False - display.debug("calling _low_level_execute_command() for command %s" % cmd) res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data) - display.debug("_low_level_execute_command returned ok") if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if self._play_context.become and self._play_context.become_user != 'root': @@ -498,21 +486,20 @@ class ActionBase(with_metaclass(ABCMeta, object)): if executable is not None: cmd = executable + ' -c ' + cmd - display.debug("in _low_level_execute_command() (%s)" % (cmd,)) + display.debug("_low_level_execute_command(): starting") if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) - display.debug("no command, exiting _low_level_execute_command()") + display.debug("_low_level_execute_command(): no command, exiting") return dict(stdout='', stderr='') allow_same_user = C.BECOME_ALLOW_SAME_USER same_user = self._play_context.become_user == self._play_context.remote_user if sudoable and self._play_context.become and (allow_same_user or not same_user): - display.debug("using become for this command") + display.debug("_low_level_execute_command(): using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) - display.debug("executing the command %s through the connection" % cmd) + display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.debug("command execution done: rc=%s" % (rc)) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type @@ -530,11 +517,11 @@ class ActionBase(with_metaclass(ABCMeta, object)): else: err = stderr - display.debug("stdout=%s, stderr=%s" % (stdout, stderr)) - display.debug("done with _low_level_execute_command() (%s)" % (cmd,)) if rc is None: rc = 0 + display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr)) + return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err) def _get_first_available_file(self, faf, of=None, searchdir='files'): diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 4251f8a63e..a2abcf20ae 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -319,7 +319,7 @@ class Connection(ConnectionBase): Starts the command and communicates with it until it ends. ''' - display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]] + display_cmd = map(pipes.quote, cmd) display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) # Start the given command. If we don't need to pipeline data, we can try From bd0f9a4afc8406f71d65c50cda35a43549998fc1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 10 Dec 2015 21:50:11 -0500 Subject: [PATCH 239/590] fix make complaint when git is not installed --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ac4c07f431..f62cffb2df 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ GIT_HASH := $(shell git log -n 1 --format="%h") GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.\/]//g') GITINFO = .$(GIT_HASH).$(GIT_BRANCH) else -GITINFO = '' +GITINFO = "" endif ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1) From 58072c92fb762881679c31d050d519ccd83cb209 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 09:32:19 -0500 Subject: [PATCH 240/590] removed 'bare' example in environment now shows how to use explicit templating --- docsite/rst/playbooks_environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_environment.rst b/docsite/rst/playbooks_environment.rst index da050f007d..f909bfcd6e 100644 --- a/docsite/rst/playbooks_environment.rst +++ b/docsite/rst/playbooks_environment.rst @@ -31,7 +31,7 @@ The environment can also be stored in a variable, and accessed like so:: tasks: - apt: name=cobbler state=installed - environment: proxy_env + environment: "{{proxy_env}}" You can also use it at a playbook level:: From d9e510b19273d6a495e6694b6930e49de80f9500 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 13:12:24 -0500 Subject: [PATCH 241/590] narrow down exception catching in block builds this was obscuring other errors and should have always been narrow scope --- lib/ansible/playbook/role/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 1c6b344a4f..f308954f52 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -150,7 +150,7 @@ class Role(Base, Become, Conditional, Taggable): current_when = getattr(self, 'when')[:] current_when.extend(role_include.when) setattr(self, 'when', current_when) - + current_tags = getattr(self, 'tags')[:] current_tags.extend(role_include.tags) setattr(self, 'tags', current_tags) @@ -174,7 +174,7 @@ class Role(Base, Become, Conditional, Taggable): if task_data: try: self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader) - except: + except AssertionError: raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data) handler_data = self._load_role_yaml('handlers') From 97554fc222628057d7f3255ce2caac8dfe5d783f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 11 Dec 2015 00:18:47 -0500 Subject: [PATCH 242/590] Fixing filter test for extract to use proper group --- test/integration/roles/test_filters/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index af6c5d49de..cb1549d3f7 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -77,4 +77,4 @@ - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last" - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first" - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first" - - "'ungrouped' == ['localhost']|map('extract',hostvars,['vars','group_names',0])|list|first" + - "'amazon' == ['localhost']|map('extract',hostvars,['vars','group_names',0])|list|first" From 7f7e730dea36dbb709b47c39ca1a28cb9f6cb3f1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 11 Dec 2015 14:55:44 -0500 Subject: [PATCH 243/590] Don't mark hosts failed if they've moved to a rescue portion of a block Fixes #13521 --- lib/ansible/plugins/strategy/__init__.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 15636b580d..91ca4e8638 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -30,6 +30,11 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable +<<<<<<< Updated upstream +======= +from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess +>>>>>>> Stashed changes from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -202,8 +207,10 @@ class StrategyBase: [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts] else: iterator.mark_host_failed(host) - self._tqm._failed_hosts[host.name] = True - self._tqm._stats.increment('failures', host.name) + (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) + if state.run_state != PlayIterator.ITERATING_RESCUE: + self._tqm._failed_hosts[host.name] = True + self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) From de71171fc21a81a343eb28ed25472ef4aa17406c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 15:10:48 -0500 Subject: [PATCH 244/590] removed merge conflict --- lib/ansible/plugins/strategy/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 91ca4e8638..5d31a3dba8 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -30,11 +30,7 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable -<<<<<<< Updated upstream -======= from ansible.executor.play_iterator import PlayIterator -from ansible.executor.process.worker import WorkerProcess ->>>>>>> Stashed changes from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group From ae988ed753f69cb2a7bf115c7cee41e53f01ef3e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 15:35:57 -0500 Subject: [PATCH 245/590] avoid set to unique hosts to preserver order swiched to using a list comp and set to still unique but keep expected order fixes #13522 --- lib/ansible/inventory/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 3c1331e706..95e193f381 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -194,7 +194,8 @@ class Inventory(object): if self._restriction is not None: hosts = [ h for h in hosts if h in self._restriction ] - HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) + seen = set() + HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)] return HOSTS_PATTERNS_CACHE[pattern_hash][:] From 120b9a7ac6274c54d091291587b0c9ec865905a1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 10 Dec 2015 18:03:25 -0500 Subject: [PATCH 246/590] Changing the way workers are forked --- bin/ansible | 1 + lib/ansible/executor/process/worker.py | 108 ++++++++------------- lib/ansible/executor/task_queue_manager.py | 31 +----- lib/ansible/plugins/strategy/__init__.py | 48 ++++----- lib/ansible/plugins/strategy/linear.py | 5 +- 5 files changed, 70 insertions(+), 123 deletions(-) diff --git a/bin/ansible b/bin/ansible index 7e1aa01a93..627510a72e 100755 --- a/bin/ansible +++ b/bin/ansible @@ -60,6 +60,7 @@ if __name__ == '__main__': try: display = Display() + display.debug("starting run") sub = None try: diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index a1a83a5dda..73f5faa78b 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -59,14 +59,18 @@ class WorkerProcess(multiprocessing.Process): for reading later. ''' - def __init__(self, tqm, main_q, rslt_q, hostvars_manager, loader): + def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj): super(WorkerProcess, self).__init__() # takes a task queue manager as the sole param: - self._main_q = main_q - self._rslt_q = rslt_q - self._hostvars = hostvars_manager - self._loader = loader + self._rslt_q = rslt_q + self._task_vars = task_vars + self._host = host + self._task = task + self._play_context = play_context + self._loader = loader + self._variable_manager = variable_manager + self._shared_loader_obj = shared_loader_obj # dupe stdin, if we have one self._new_stdin = sys.stdin @@ -97,73 +101,45 @@ class WorkerProcess(multiprocessing.Process): if HAS_ATFORK: atfork() - while True: - task = None - try: - #debug("waiting for work") - (host, task, basedir, zip_vars, compressed_vars, play_context, shared_loader_obj) = self._main_q.get(block=False) + try: + # execute the task and build a TaskResult from the result + debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) + executor_result = TaskExecutor( + self._host, + self._task, + self._task_vars, + self._play_context, + self._new_stdin, + self._loader, + self._shared_loader_obj, + ).run() - if compressed_vars: - job_vars = json.loads(zlib.decompress(zip_vars)) - else: - job_vars = zip_vars + debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, executor_result) - job_vars['hostvars'] = self._hostvars.hostvars() + # put the result on the result queue + debug("sending task result") + self._rslt_q.put(task_result) + debug("done sending task result") - debug("there's work to be done! got a task/handler to work on: %s" % task) + except AnsibleConnectionFailure: + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, dict(unreachable=True)) + self._rslt_q.put(task_result, block=False) - # because the task queue manager starts workers (forks) before the - # playbook is loaded, set the basedir of the loader inherted by - # this fork now so that we can find files correctly - self._loader.set_basedir(basedir) - - # Serializing/deserializing tasks does not preserve the loader attribute, - # since it is passed to the worker during the forking of the process and - # would be wasteful to serialize. So we set it here on the task now, and - # the task handles updating parent/child objects as needed. - task.set_loader(self._loader) - - # execute the task and build a TaskResult from the result - debug("running TaskExecutor() for %s/%s" % (host, task)) - executor_result = TaskExecutor( - host, - task, - job_vars, - play_context, - self._new_stdin, - self._loader, - shared_loader_obj, - ).run() - debug("done running TaskExecutor() for %s/%s" % (host, task)) - task_result = TaskResult(host, task, executor_result) - - # put the result on the result queue - debug("sending task result") - self._rslt_q.put(task_result) - debug("done sending task result") - - except queue.Empty: - time.sleep(0.0001) - except AnsibleConnectionFailure: + except Exception as e: + if not isinstance(e, (IOError, EOFError, KeyboardInterrupt)) or isinstance(e, TemplateNotFound): try: - if task: - task_result = TaskResult(host, task, dict(unreachable=True)) - self._rslt_q.put(task_result, block=False) + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, dict(failed=True, exception=traceback.format_exc(), stdout='')) + self._rslt_q.put(task_result, block=False) except: - break - except Exception as e: - if isinstance(e, (IOError, EOFError, KeyboardInterrupt)) and not isinstance(e, TemplateNotFound): - break - else: - try: - if task: - task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout='')) - self._rslt_q.put(task_result, block=False) - except: - debug("WORKER EXCEPTION: %s" % e) - debug("WORKER EXCEPTION: %s" % traceback.format_exc()) - break + debug("WORKER EXCEPTION: %s" % e) + debug("WORKER EXCEPTION: %s" % traceback.format_exc()) debug("WORKER PROCESS EXITING") - diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index e2b29a5282..9189ab9581 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -102,11 +102,7 @@ class TaskQueueManager: for i in xrange(num): main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() - - prc = WorkerProcess(self, main_q, rslt_q, self._hostvars_manager, self._loader) - prc.start() - - self._workers.append((prc, main_q, rslt_q)) + self._workers.append([None, main_q, rslt_q]) self._result_prc = ResultProcess(self._final_q, self._workers) self._result_prc.start() @@ -195,31 +191,12 @@ class TaskQueueManager: new_play = play.copy() new_play.post_validate(templar) - class HostVarsManager(SyncManager): - pass - - hostvars = HostVars( + self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) - HostVarsManager.register( - 'hostvars', - callable=lambda: hostvars, - # FIXME: this is the list of exposed methods to the DictProxy object, plus our - # special ones (set_variable_manager/set_inventory). There's probably a better way - # to do this with a proper BaseProxy/DictProxy derivative - exposed=( - 'set_variable_manager', 'set_inventory', '__contains__', '__delitem__', - 'set_nonpersistent_facts', 'set_host_facts', 'set_host_variable', - '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', - 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' - ), - ) - self._hostvars_manager = HostVarsManager() - self._hostvars_manager.start() - # Fork # of forks, # of hosts or serial, whichever is lowest contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))] contenders = [ v for v in contenders if v is not None and v > 0 ] @@ -259,7 +236,6 @@ class TaskQueueManager: # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) self._cleanup_processes() - self._hostvars_manager.shutdown() return play_return def cleanup(self): @@ -275,7 +251,8 @@ class TaskQueueManager: for (worker_prc, main_q, rslt_q) in self._workers: rslt_q.close() main_q.close() - worker_prc.terminate() + if worker_prc and worker_prc.is_alive(): + worker_prc.terminate() def clear_failed_hosts(self): self._failed_hosts = dict() diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 5d31a3dba8..ea30b800b0 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -31,6 +31,7 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -138,38 +139,29 @@ class StrategyBase: display.debug("entering _queue_task() for %s/%s" % (host, task)) + task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) try: display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers))) - (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] - self._cur_worker += 1 - if self._cur_worker >= len(self._workers): - self._cur_worker = 0 - # create a dummy object with plugin loaders set as an easier # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() - # compress (and convert) the data if so configured, which can - # help a lot when the variable dictionary is huge. We pop the - # hostvars out of the task variables right now, due to the fact - # that they're not JSON serializable - compressed_vars = False - if C.DEFAULT_VAR_COMPRESSION_LEVEL > 0: - zip_vars = zlib.compress(json.dumps(task_vars), C.DEFAULT_VAR_COMPRESSION_LEVEL) - compressed_vars = True - # we're done with the original dict now, so delete it to - # try and reclaim some memory space, which is helpful if the - # data contained in the dict is very large - del task_vars - else: - zip_vars = task_vars # noqa (pyflakes false positive because task_vars is deleted in the conditional above) - - # and queue the task - main_q.put((host, task, self._loader.get_basedir(), zip_vars, compressed_vars, play_context, shared_loader_obj)) + while True: + (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] + if worker_prc is None or not worker_prc.is_alive(): + worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) + self._workers[self._cur_worker][0] = worker_prc + worker_prc.start() + break + self._cur_worker += 1 + if self._cur_worker >= len(self._workers): + self._cur_worker = 0 + time.sleep(0.0001) + del task_vars self._pending_results += 1 except (EOFError, IOError, AssertionError) as e: # most likely an abort @@ -177,7 +169,7 @@ class StrategyBase: return display.debug("exiting _queue_task() for %s/%s" % (host, task)) - def _process_pending_results(self, iterator): + def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). @@ -247,13 +239,11 @@ class StrategyBase: new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) - self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'add_group': host = result[1] result_item = result[2] self._add_group(host, result_item) - self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'notify_handler': task_result = result[1] @@ -283,7 +273,6 @@ class StrategyBase: for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) - self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] @@ -316,21 +305,22 @@ class StrategyBase: for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) - self._tqm._hostvars_manager.hostvars().set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(actual_host, facts) - self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(actual_host, facts) else: self._variable_manager.set_host_facts(actual_host, facts) - self._tqm._hostvars_manager.hostvars().set_host_facts(actual_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) + except Queue.Empty: time.sleep(0.0001) + if one_pass: + break + return ret_results def _wait_on_pending_results(self, iterator): diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 8a8d5c084a..8c94267cf4 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -169,6 +169,7 @@ class StrategyModule(StrategyBase): skip_rest = False choose_step = True + results = [] for (host, task) in host_tasks: if not task: continue @@ -243,12 +244,14 @@ class StrategyModule(StrategyBase): if run_once: break + results += self._process_pending_results(iterator, one_pass=True) + # go to next host/task group if skip_rest: continue display.debug("done queuing things up, now waiting for results queue to drain") - results = self._wait_on_pending_results(iterator) + results += self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: From 8db291274519331ed186f0b9dc0711f6754cb25d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 12:59:00 -0500 Subject: [PATCH 247/590] corrected section anchors --- docsite/rst/developing_releases.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/developing_releases.rst b/docsite/rst/developing_releases.rst index 1eeb242121..2332459c30 100644 --- a/docsite/rst/developing_releases.rst +++ b/docsite/rst/developing_releases.rst @@ -4,7 +4,7 @@ Releases .. contents:: Topics :local: -.. schedule:: +.. _schedule: Release Schedule ```````````````` @@ -16,7 +16,7 @@ When a fix/feature gets added to the `devel` branch it will be part of the next Sometimes an RC can be extended by a few days if a bugfix makes a change that can have far reaching consequences, so users have enough time to find any new issues that may stem from this. -.. methods:: +.. _methods: Release methods ```````````````` @@ -25,7 +25,7 @@ Ansible normally goes through a 'release candidate', issuing an RC1 for a releas Otherwise fixes will be applied and an RC2 will be provided for testing and if no bugs after 2 days, the final release will be made, iterating this last step and incrementing the candidate number as we find major bugs. -.. freezing:: +.. _freezing: Release feature freeze `````````````````````` From 0a112a1b0617d4087ae3e46ea031101af204d48e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:14:14 -0500 Subject: [PATCH 248/590] fixed formating issues with rst --- docsite/rst/porting_guide_2.0.rst | 44 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 9c26a4b161..8d69ecd440 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -56,12 +56,11 @@ uses key=value escaping which has not changed. The other option is to check for "msg": "Testing some things" * porting task includes - * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. - * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 - * variable precedence +* More dynamic. Corner-case formats that were not supposed to work now do not, as expected. +* variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 * templating (variables in playbooks and template lookups) has improved with regard to keeping the original instead of turning everything into a string. - If you need the old behavior, quote the value to pass it around as a string. - Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. + If you need the old behavior, quote the value to pass it around as a string. +* Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. You can override the `null_representation` setting to an empty string in your config file by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. * Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed. * dnf module has been rewritten. Some minor changes in behavior may be observed. @@ -72,26 +71,26 @@ Deprecated While all items listed here will show a deprecation warning message, they still work as they did in 1.9.x. Please note that they will be removed in 2.2 (Ansible always waits two major releases to remove a deprecated feature). -* Bare variables in with_ loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. +* Bare variables in `with_` loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. * The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead. -* Undefined variables within a with_ loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. +* Undefined variables within a `with_` loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. * Using variables for task parameters is unsafe and will be removed in a future version. For example:: - hosts: localhost - gather_facts: no - vars: - debug_params: - msg: "hello there" - tasks: - - debug: "{{debug_params}}" + gather_facts: no + vars: + debug_params: + msg: "hello there" + tasks: + - debug: "{{debug_params}}" * Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern. * Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y]. * Playbooks using privilege escalation should always use “become*” options rather than the old su*/sudo* options. -* The “short form” for vars_prompt is no longer supported. -For example:: +* The “short form” for vars_prompt is no longer supported. + For example:: -vars_prompt: + vars_prompt: variable_name: "Prompt string" * Specifying variables at the top level of a task include statement is no longer supported. For example:: @@ -101,21 +100,21 @@ vars_prompt: Should now be:: -- include: foo.yml - args: - a: 1 + - include: foo.yml + args: + a: 1 * Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only. * Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’. * Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. -For example:: + For example:: - include: foo.yml tags=a,b,c -Should be:: + Should be:: - include: foo.yml - tags: [a, b, c] + tags: [a, b, c] * The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin. @@ -125,7 +124,6 @@ Porting plugins In ansible-1.9.x, you would generally copy an existing plugin to create a new one. Simply implementing the methods and attributes that the caller of the plugin expected made it a plugin of that type. In ansible-2.0, most plugins are implemented by subclassing a base class for each plugin type. This way the custom plugin does not need to contain methods which are not customized. -.. note:: Lookup plugins -------------- From d7b516f75dc879ad350b285e7ddc398418bf85fd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:16:40 -0500 Subject: [PATCH 249/590] added releases doc --- docsite/rst/developing.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/developing.rst b/docsite/rst/developing.rst index 2a25899301..c5a1dca061 100644 --- a/docsite/rst/developing.rst +++ b/docsite/rst/developing.rst @@ -11,6 +11,7 @@ Learn how to build modules of your own in any language, and also how to extend A developing_modules developing_plugins developing_test_pr + developing_releases Developers will also likely be interested in the fully-discoverable in :doc:`tower`. It's great for embedding Ansible in all manner of applications. From 8e445c551a23f52e901c9b1d2603e496a2e88c11 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:43:10 -0500 Subject: [PATCH 250/590] removed unused imports in galaxy/cli --- lib/ansible/cli/galaxy.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 01e0475b24..0f9074da93 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -25,7 +25,6 @@ __metaclass__ = type import os.path import sys import yaml -import json import time from collections import defaultdict @@ -40,7 +39,6 @@ from ansible.galaxy.role import GalaxyRole from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement -from ansible.module_utils.urls import open_url try: from __main__ import display @@ -61,10 +59,10 @@ class GalaxyCLI(CLI): "remove": "delete a role from your roles path", "search": "query the Galaxy API", "setup": "add a TravisCI integration to Galaxy", - } + } SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + def __init__(self, args): self.VALID_ACTIONS = self.available_commands.keys() self.VALID_ACTIONS.sort() @@ -101,7 +99,7 @@ class GalaxyCLI(CLI): usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - + self.set_action() # options specific to actions @@ -131,7 +129,7 @@ class GalaxyCLI(CLI): self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') + help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": @@ -190,7 +188,7 @@ class GalaxyCLI(CLI): # if not offline, get connect to galaxy api if self.action in ("import","info","install","search","login","setup","delete") or \ - (self.action == 'init' and not self.options.offline): + (self.action == 'init' and not self.options.offline): self.api = GalaxyAPI(self.galaxy) self.execute() @@ -544,7 +542,7 @@ class GalaxyCLI(CLI): def execute_search(self): page_size = 1000 search = None - + if len(self.args): terms = [] for i in range(len(self.args)): @@ -556,7 +554,7 @@ class GalaxyCLI(CLI): response = self.api.search_roles(search, platforms=self.options.platforms, tags=self.options.tags, author=self.options.author, page_size=page_size) - + if response['count'] == 0: display.display("No roles match your search.", color="yellow") return True @@ -578,7 +576,7 @@ class GalaxyCLI(CLI): data += (format_str % ("----", "-----------")) for role in response['results']: data += (format_str % (role['username'] + '.' + role['name'],role['description'])) - + self.pager(data) return True @@ -595,12 +593,12 @@ class GalaxyCLI(CLI): github_token = self.options.token galaxy_response = self.api.authenticate(github_token) - + if self.options.token is None: # Remove the token we created login.remove_github_token() - - # Store the Galaxy token + + # Store the Galaxy token token = GalaxyToken() token.set(galaxy_response['token']) @@ -611,7 +609,7 @@ class GalaxyCLI(CLI): """ Import a role into Galaxy """ - + colors = { 'INFO': 'normal', 'WARNING': 'yellow', @@ -631,7 +629,7 @@ class GalaxyCLI(CLI): else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) - + if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), @@ -693,7 +691,7 @@ class GalaxyCLI(CLI): if len(self.args) < 4: raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") return 0 - + secret = self.args.pop() github_repo = self.args.pop() github_user = self.args.pop() @@ -711,7 +709,7 @@ class GalaxyCLI(CLI): if len(self.args) < 2: raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") - + github_repo = self.args.pop() github_user = self.args.pop() resp = self.api.delete_role(github_user, github_repo) @@ -722,9 +720,8 @@ class GalaxyCLI(CLI): display.display("------ --------------- ----------") for role in resp['deleted_roles']: display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) - + display.display(resp['status']) return True - From 3c4d2fc6f2cdeba074511fb591134014cf77032d Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 12 Dec 2015 19:31:19 +0100 Subject: [PATCH 251/590] Add tests for ansible.module_utils.known_hosts --- .../module_utils/basic/test_known_hosts.py | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 test/units/module_utils/basic/test_known_hosts.py diff --git a/test/units/module_utils/basic/test_known_hosts.py b/test/units/module_utils/basic/test_known_hosts.py new file mode 100644 index 0000000000..952184bfec --- /dev/null +++ b/test/units/module_utils/basic/test_known_hosts.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Michael Scherer +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.compat.tests import unittest +from ansible.module_utils import known_hosts + +class TestAnsibleModuleKnownHosts(unittest.TestCase): + urls = { + 'ssh://one.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'one.example.org'}, + 'ssh+git://two.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'two.example.org'}, + 'rsync://three.example.org/user/example.git': + {'is_ssh_url': False, 'get_fqdn': 'three.example.org'}, + 'git@four.example.org:user/example.git': + {'is_ssh_url': True, 'get_fqdn': 'four.example.org'}, + 'git+ssh://five.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'five.example.org'}, + 'ssh://six.example.org:21/example.org': + {'is_ssh_url': True, 'get_fqdn': 'six.example.org'}, + } + + def test_is_ssh_url(self): + for u in self.urls: + self.assertEqual(known_hosts.is_ssh_url(u), self.urls[u]['is_ssh_url']) + + def test_get_fqdn(self): + for u in self.urls: + self.assertEqual(known_hosts.get_fqdn(u), self.urls[u]['get_fqdn']) + + + From 99e46440bdaf622958f78cebecb52dec7ed67669 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 16:10:18 -0500 Subject: [PATCH 252/590] changed shell delimiters for csh fixes #13459 --- lib/ansible/plugins/shell/csh.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py index 1c383d133c..bd210f12fe 100644 --- a/lib/ansible/plugins/shell/csh.py +++ b/lib/ansible/plugins/shell/csh.py @@ -24,6 +24,8 @@ class ShellModule(ShModule): # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\\\n' _SHELL_REDIRECT_ALLNULL = '>& /dev/null' + _SHELL_SUB_LEFT = '"`' + _SHELL_SUB_RIGHT = '`"' def env_prefix(self, **kwargs): return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) From f3bedbae2991b540421d64f5be942ec7c84fdf7d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 17:50:55 -0500 Subject: [PATCH 253/590] simplified skippy thanks agaffney! --- lib/ansible/plugins/callback/skippy.py | 159 +------------------------ 1 file changed, 6 insertions(+), 153 deletions(-) diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 495943417f..306d1a534e 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -19,10 +19,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible import constants as C -from ansible.plugins.callback import CallbackBase +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default -class CallbackModule(CallbackBase): +class CallbackModule(CallbackModule_default): ''' This is the default callback interface, which simply prints messages @@ -33,154 +32,8 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'skippy' - def v2_runner_on_failed(self, result, ignore_errors=False): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if 'exception' in result._result: - if self._display.verbosity < 3: - # extract just the actual error message from the exception text - error = result._result['exception'].strip().split('\n')[-1] - msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error - else: - msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - - self._display.display(msg, color='red') - - # finally, remove the exception from the result so it's not shown every time - del result._result['exception'] - - if result._task.loop and 'results' in result._result: - self._process_items(result) - else: - if delegated_vars: - self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') - else: - self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') - - if result._task.ignore_errors: - self._display.display("...ignoring", color='cyan') - - def v2_runner_on_ok(self, result): - - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if result._task.action == 'include': - return - elif result._result.get('changed', False): - if delegated_vars: - msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' - else: - if delegated_vars: - msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "ok: [%s]" % result._host.get_name() - color = 'green' - - if result._task.loop and 'results' in result._result: - self._process_items(result) - else: - - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: - msg += " => %s" % (self._dump_results(result._result),) - self._display.display(msg, color=color) - - self._handle_warnings(result._result) - - def v2_runner_on_unreachable(self, result): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if delegated_vars: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') - else: - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') - - def v2_playbook_on_no_hosts_matched(self): - self._display.display("skipping: no hosts matched", color='cyan') - - def v2_playbook_on_no_hosts_remaining(self): - self._display.banner("NO MORE HOSTS LEFT") - - def v2_playbook_on_task_start(self, task, is_conditional): - self._display.banner("TASK [%s]" % task.get_name().strip()) - if self._display.verbosity > 2: - path = task.get_path() - if path: - self._display.display("task path: %s" % path, color='dark gray') - - def v2_playbook_on_cleanup_task_start(self, task): - self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) - - def v2_playbook_on_handler_task_start(self, task): - self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) - - def v2_playbook_on_play_start(self, play): - name = play.get_name().strip() - if not name: - msg = "PLAY" - else: - msg = "PLAY [%s]" % name - - self._display.banner(msg) - - def v2_on_file_diff(self, result): - if result._task.loop and 'results' in result._result: - for res in result._result['results']: - newres = self._copy_result(result) - res['item'] = self._get_item(res) - newres._result = res - - self.v2_on_file_diff(newres) - elif 'diff' in result._result and result._result['diff']: - self._display.display(self._get_diff(result._result['diff'])) - - def v2_playbook_item_on_ok(self, result): - - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if result._task.action == 'include': - return - elif result._result.get('changed', False): - if delegated_vars: - msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' - else: - if delegated_vars: - msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "ok: [%s]" % result._host.get_name() - color = 'green' - - msg += " => (item=%s)" % (result._result['item'],) - - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: - msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color=color) - - def v2_playbook_item_on_failed(self, result): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if 'exception' in result._result: - if self._display.verbosity < 3: - # extract just the actual error message from the exception text - error = result._result['exception'].strip().split('\n')[-1] - msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error - else: - msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - - self._display.display(msg, color='red') - - # finally, remove the exception from the result so it's not shown every time - del result._result['exception'] - - if delegated_vars: - self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red') - else: - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') - - self._handle_warnings(result._result) - - def v2_playbook_on_include(self, included_file): - msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) - color = 'cyan' - self._display.display(msg, color='cyan') + def v2_runner_on_skipped(self, result): + pass + def v2_playbook_item_on_skipped(self, result): + pass From d73562902b289e7fd7e2e5a37e82b00c83a16369 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 13 Dec 2015 00:13:13 -0500 Subject: [PATCH 254/590] debug now validates its params simplified var handling made default message the same as in pre 2.0 fixes #13532 --- lib/ansible/plugins/action/debug.py | 35 ++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index a0ffb71404..2af20eddfc 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -20,40 +20,45 @@ __metaclass__ = type from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.unicode import to_unicode +from ansible.errors import AnsibleUndefinedVariable class ActionModule(ActionBase): ''' Print statements during execution ''' TRANSFERS_FILES = False + VALID_ARGS = set(['msg', 'var']) def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() + for arg in self._task.args: + if arg not in self.VALID_ARGS: + return {"failed": True, "msg": "'%s' is not a valid option in debug" % arg} + + if 'msg' in self._task.args and 'var' in self._task.args: + return {"failed": True, "msg": "'msg' and 'var' are incompatible options"} + result = super(ActionModule, self).run(tmp, task_vars) if 'msg' in self._task.args: - if 'fail' in self._task.args and boolean(self._task.args['fail']): - result['failed'] = True - result['msg'] = self._task.args['msg'] - else: - result['msg'] = self._task.args['msg'] - # FIXME: move the LOOKUP_REGEX somewhere else - elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=False) + result['msg'] = self._task.args['msg'] + + elif 'var' in self._task.args: + try: + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True) + if results == self._task.args['var']: + raise AnsibleUndefinedVariable + except AnsibleUndefinedVariable: + results = "VARIABLE IS NOT DEFINED!" + if type(self._task.args['var']) in (list, dict): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results else: - # If var name is same as result, try to template it - if results == self._task.args['var']: - try: - results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True) - except: - results = "VARIABLE IS NOT DEFINED!" result[self._task.args['var']] = results else: - result['msg'] = 'here we are' + result['msg'] = 'Hello world!' # force flag to make debug output module always verbose result['_ansible_verbose_always'] = True From e2ad4fe9100729462fbd511c75a035ccdfd41841 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 13 Dec 2015 00:34:23 -0500 Subject: [PATCH 255/590] include all packaging in tarball not juse rpm spec file --- MANIFEST.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index d8402f0297..64c5bf1fcb 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,12 +4,13 @@ prune ticket_stubs prune packaging prune test prune hacking -include README.md packaging/rpm/ansible.spec COPYING +include README.md COPYING include examples/hosts include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 recursive-include lib/ansible/modules * recursive-include docs * +recursive-include packaging * include Makefile include VERSION include MANIFEST.in From 4779f29777872f1352c65ea504eb81e998a47b7b Mon Sep 17 00:00:00 2001 From: Usman Ehtesham Gul Date: Sun, 13 Dec 2015 01:24:27 -0500 Subject: [PATCH 256/590] Fix Doc mistake Fix Doc mistake in ansible/docsite/rst/playbooks_variables.rst --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 307387a72e..122c0ef923 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -796,7 +796,7 @@ Basically, anything that goes into "role defaults" (the defaults folder inside t .. [1] Tasks in each role will see their own role's defaults. Tasks defined outside of a role will see the last role's defaults. .. [2] Variables defined in inventory file or provided by dynamic inventory. -.. note:: Within a any section, redefining a var will overwrite the previous instance. +.. note:: Within any section, redefining a var will overwrite the previous instance. If multiple groups have the same variable, the last one loaded wins. If you define a variable twice in a play's vars: section, the 2nd one wins. .. note:: the previous describes the default config `hash_behavior=replace`, switch to 'merge' to only partially overwrite. From 1b2ebe8defddbb6f6cd471f999d6eba8b78f1446 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Sun, 13 Dec 2015 10:56:47 +0100 Subject: [PATCH 257/590] make shallow clone the default for ansibel-pull --- lib/ansible/cli/pull.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 67e8925930..7b2fd13e5e 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -80,8 +80,8 @@ class PullCLI(CLI): help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') - self.parser.add_option('--depth', dest='depth', default=None, - help='Depth of checkout, shallow checkout if greater or equal 1 . Defaults to full checkout.') + self.parser.add_option('--full', dest='fullclone', action='store_true', + help='Do a full clone, instead of a shallow one.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -157,8 +157,8 @@ class PullCLI(CLI): if self.options.verify: repo_opts += ' verify_commit=yes' - if self.options.depth: - repo_opts += ' depth=%s' % self.options.depth + if not self.options.fullclone: + repo_opts += ' depth=1' path = module_loader.find_plugin(self.options.module_name) From 1bd8d97093f30e4848640a5c43a7f830a9112e2f Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Sun, 13 Dec 2015 11:19:50 +0100 Subject: [PATCH 258/590] fix whitespace --- lib/ansible/cli/pull.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 7b2fd13e5e..2571717766 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -156,7 +156,7 @@ class PullCLI(CLI): if self.options.verify: repo_opts += ' verify_commit=yes' - + if not self.options.fullclone: repo_opts += ' depth=1' From d8e6bc98a2494628aca2fc406655dce70701f525 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 17:09:34 -0500 Subject: [PATCH 259/590] Fix overloaded options. Show an error when no action given. Don't show a helpful list of commands and descriptions. --- lib/ansible/cli/galaxy.py | 68 ++++++++------------------------------- 1 file changed, 13 insertions(+), 55 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 0f9074da93..13df7c4122 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -48,50 +48,14 @@ except ImportError: class GalaxyCLI(CLI): - available_commands = { - "delete": "remove a role from Galaxy", - "import": "add a role contained in a GitHub repo to Galaxy", - "info": "display details about a particular role", - "init": "create a role directory structure in your roles path", - "install": "download a role into your roles path", - "list": "enumerate roles found in your roles path", - "login": "authenticate with Galaxy API and store the token", - "remove": "delete a role from your roles path", - "search": "query the Galaxy API", - "setup": "add a TravisCI integration to Galaxy", - } - SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + VALID_ACTIONS = ("delete","import","info","init","install","list","login","remove","search","setup") + def __init__(self, args): - self.VALID_ACTIONS = self.available_commands.keys() - self.VALID_ACTIONS.sort() self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) - def set_action(self): - """ - Get the action the user wants to execute from the sys argv list. - """ - for i in range(0,len(self.args)): - arg = self.args[i] - if arg in self.VALID_ACTIONS: - self.action = arg - del self.args[i] - break - - if not self.action: - self.show_available_actions() - - def show_available_actions(self): - # list available commands - display.display(u'\n' + "usage: ansible-galaxy COMMAND [--help] [options] ...") - display.display(u'\n' + "availabe commands:" + u'\n\n') - for key in self.VALID_ACTIONS: - display.display(u'\t' + "%-12s %s" % (key, self.available_commands[key])) - display.display(' ') - def parse(self): ''' create an options parser for bin/ansible ''' @@ -107,11 +71,11 @@ class GalaxyCLI(CLI): self.parser.set_usage("usage: %prog delete [options] github_user github_repo") elif self.action == "import": self.parser.set_usage("usage: %prog import [options] github_user github_repo") - self.parser.add_option('-n', '--no-wait', dest='wait', action='store_false', default=True, + self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.') - self.parser.add_option('-b', '--branch', dest='reference', + self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') - self.parser.add_option('-t', '--status', dest='check_status', action='store_true', default=False, + self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.') elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") @@ -147,15 +111,14 @@ class GalaxyCLI(CLI): help='GitHub username') self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") elif self.action == "setup": - self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret" + - u'\n\n' + "Create an integration with travis.") - self.parser.add_option('-r', '--remove', dest='remove_id', default=None, + self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret") + self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.') - self.parser.add_option('-l', '--list', dest="setup_list", action='store_true', default=False, + self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') # options that apply to more than one action - if not self.action in ("config","import","init","login","setup"): + if not self.action in ("import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' @@ -171,19 +134,14 @@ class GalaxyCLI(CLI): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') - if self.action: - # get options, args and galaxy object - self.options, self.args =self.parser.parse_args() - display.verbosity = self.options.verbosity - self.galaxy = Galaxy(self.options) + self.options, self.args =self.parser.parse_args() + display.verbosity = self.options.verbosity + self.galaxy = Galaxy(self.options) return True def run(self): - - if not self.action: - return True - + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api From 989604b1a3977e6246f997d1a75aaf97776b28ae Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 17:12:53 -0500 Subject: [PATCH 260/590] Fix typo. --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 783ac15e45..c9dea27336 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -126,7 +126,7 @@ The above will create the following directory structure in the current working d :: README.md - .travsis.yml + .travis.yml defaults/ main.yml files/ From bc7392009069749042bf937eb315ea19c513d0ff Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 18:28:57 -0500 Subject: [PATCH 261/590] Updated ansible-galaxy man page. Removed -b option for import. --- docs/man/man1/ansible-galaxy.1.asciidoc.in | 202 ++++++++++++++++++++- lib/ansible/cli/galaxy.py | 4 +- 2 files changed, 201 insertions(+), 5 deletions(-) diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index e6f2d0b456..44f0b46b08 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -12,7 +12,7 @@ ansible-galaxy - manage roles using galaxy.ansible.com SYNOPSIS -------- -ansible-galaxy [init|info|install|list|remove] [--help] [options] ... +ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ... DESCRIPTION @@ -20,7 +20,7 @@ DESCRIPTION *Ansible Galaxy* is a shared repository for Ansible roles. The ansible-galaxy command can be used to manage these roles, -or by creating a skeleton framework for roles you'd like to upload to Galaxy. +or for creating a skeleton framework for roles you'd like to upload to Galaxy. COMMON OPTIONS -------------- @@ -29,7 +29,6 @@ COMMON OPTIONS Show a help message related to the given sub-command. - INSTALL ------- @@ -145,6 +144,203 @@ The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) +SEARCH +------ + +The *search* sub-command returns a filtered list of roles found at +galaxy.ansible.com. + +USAGE +~~~~~ + +$ ansible-galaxy search [options] [searchterm1 searchterm2] + + +OPTIONS +~~~~~~~ +*--galaxy-tags*:: + +Provide a comma separated list of Galaxy Tags on which to filter. + +*--platforms*:: + +Provide a comma separated list of Platforms on which to filter. + +*--author*:: + +Specify the username of a Galaxy contributor on which to filter. + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +INFO +---- + +The *info* sub-command shows detailed information for a specific role. +Details returned about the role included information from the local copy +as well as information from galaxy.ansible.com. + +USAGE +~~~~~ + +$ ansible-galaxy info [options] role_name[, version] + +OPTIONS +~~~~~~~ + +*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: + +The path to the directory containing your roles. The default is the *roles_path* +configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +LOGIN +----- + +The *login* sub-command is used to authenticate with galaxy.ansible.com. +Authentication is required to use the import, delete and setup commands. +It will authenticate the user,retrieve a token from Galaxy, and store it +in the user's home directory. + +USAGE +~~~~~ + +$ ansible-galaxy login [options] + +The *login* sub-command prompts for a *GitHub* username and password. It does +NOT send your password to Galaxy. It actually authenticates with GitHub and +creates a personal access token. It then sends the personal access token to +Galaxy, which in turn verifies that you are you and returns a Galaxy access +token. After authentication completes the *GitHub* personal access token is +destroyed. + +If you do not wish to use your GitHub password, or if you have two-factor +authentication enabled with GitHub, use the *--github-token* option to pass a +personal access token that you create. Log into GitHub, go to Settings and +click on Personal Access Token to create a token. + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--github-token*:: + +Authenticate using a *GitHub* personal access token rather than a password. + + +IMPORT +------ + +Import a role from *GitHub* to galaxy.ansible.com. Requires the user first +authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy import [options] github_user github_repo + +OPTIONS +~~~~~~~ +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--branch*:: + +Provide a specific branch to import. When a branch is not specified the +branch found in meta/main.yml is used. If no branch is specified in +meta/main.yml, the repo's default branch (usually master) is used. + + +DELETE +------ + +The *delete* sub-command will delete a role from galaxy.ansible.com. Requires +the user first authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy delete [options] github_user github_repo + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +SETUP +----- + +The *setup* sub-command creates an integration point for *Travis CI*, enabling +galaxy.ansible.com to receive notifications from *Travis* on build completion. +Requires the user first authenticate with galaxy.ansible.com using the *login* +subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy setup [options] source github_user github_repo secret + +* Use *travis* as the source value. In the future additional source values may + be added. + +* Provide your *Travis* user token as the secret. The token is not stored by + galaxy.ansible.com. A hash is created using github_user, github_repo + and your token. The hash value is what actually gets stored. + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +--list:: + +Show your configured integrations. Provids the ID of each integration +which can be used with the remove option. + +--remove:: + +Remove a specific integration. Provide the ID of the integration to +be removed. + AUTHOR ------ diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 13df7c4122..1cd936d028 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -100,7 +100,7 @@ class GalaxyCLI(CLI): self.parser.set_usage("usage: %prog list [role_name]") elif self.action == "login": self.parser.set_usage("usage: %prog login [options]") - self.parser.add_option('-g','--github-token', dest='token', default=None, + self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.add_option('--platforms', dest='platforms', @@ -118,7 +118,7 @@ class GalaxyCLI(CLI): help='List all of your integrations.') # options that apply to more than one action - if not self.action in ("import","init","login","setup"): + if not self.action in ("delete","import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' From f1c72ff8f51b749165d5bc4089ca8c8fd5b22789 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 22:04:00 -0500 Subject: [PATCH 262/590] Make sure it is clear that new commands require using the Galaxy 2.0 Beta site. --- docsite/rst/galaxy.rst | 58 +++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index c9dea27336..3a12044ca9 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -1,7 +1,7 @@ Ansible Galaxy ++++++++++++++ -"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool that helps work with roles. +"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool for managing and creating roles. .. contents:: Topics @@ -10,24 +10,36 @@ The Website The website `Ansible Galaxy `_, is a free site for finding, downloading, and sharing community developed Ansible roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects. -You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. +Access the Galaxy web site using GitHub OAuth, and to install roles use the 'ansible-galaxy' command line tool included in Ansible 1.4.2 and later. Read the "About" page on the Galaxy site for more information. The ansible-galaxy command line tool ```````````````````````````````````` -The command line ansible-galaxy has many different subcommands. +The ansible-galaxy command has many different sub-commands for managing roles both locally and at `galaxy.ansible.com `_. + +.. note:: + + The search, login, import, delete, and setup commands in the Ansible 2.0 version of ansible-galaxy require access to the + 2.0 Beta release of the Galaxy web site available at `https://galaxy-qa.ansible.com `_. + + Use the ``--server`` option to access the beta site. For example:: + + $ ansible-galaxy search --server https://galaxy-qa.ansible.com mysql --author geerlingguy + + Additionally, you can define a server in ansible.cfg:: + + [galaxy] + server=https://galaxy-qa.ansible.com Installing Roles ---------------- -The most obvious is downloading roles from the Ansible Galaxy website:: +The most obvious use of the ansible-galaxy command is downloading roles from `the Ansible Galaxy website `_:: $ ansible-galaxy install username.rolename -.. _galaxy_cli_roles_path: - roles_path =============== @@ -169,7 +181,9 @@ The search command will return a list of the first 1000 results matching your se .. note:: - The format of results pictured here is new in Ansible 2.0. + The search command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Get More Information About a Role --------------------------------- @@ -213,10 +227,6 @@ This returns everything found in Galaxy for the role: version: watchers_count: 1 -.. note:: - - The format of results pictured here is new in Ansible 2.0. - List Installed Roles -------------------- @@ -262,7 +272,13 @@ To use the import, delete and setup commands authentication with Galaxy is requi As depicted above, the login command prompts for a GitHub username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the GitHub personal access token is destroyed. -If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. +If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. + +.. note:: + + The login command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Import a Role ------------- @@ -298,7 +314,9 @@ If the --no-wait option is present, the command will not wait for results. Resul .. note:: - The import command is only available in Ansible 2.0. + The import command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Delete a Role ------------- @@ -307,13 +325,15 @@ Remove a role from the Galaxy web site using the delete command. You can delete :: - ansible-galaxy delete github_user github_repo + $ ansible-galaxy delete github_user github_repo This only removes the role from Galaxy. It does not impact the actual GitHub repo. .. note:: - The delete command is only available in Ansible 2.0. + The delete command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Setup Travis Integerations -------------------------- @@ -324,7 +344,7 @@ Using the setup command you can enable notifications from `travis `_. The calculated hash is stored in Galaxy and used to verify notifications received from Travis. @@ -339,7 +359,9 @@ When you create your .travis.yml file add the following to cause Travis to notif .. note:: - The setup command is only available in Ansible 2.0. + The setup command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. List Travis Integrtions @@ -361,7 +383,7 @@ Use the --list option to display your Travis integrations: Remove Travis Integrations ========================== -Use the --remove option to disable a Travis integration: +Use the --remove option to disable and remove a Travis integration: :: From 342dee0023e2c6fd6d361a70fec621c09b833915 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 22:56:54 -0500 Subject: [PATCH 263/590] Define and handle ignore_certs correctly. Preserve search term order. Tweak to Galaxy docsite. --- docsite/rst/galaxy.rst | 2 +- lib/ansible/cli/galaxy.py | 8 ++++---- lib/ansible/galaxy/api.py | 18 ++++++++---------- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 3a12044ca9..200fdfd575 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -41,7 +41,7 @@ The most obvious use of the ansible-galaxy command is downloading roles from `th $ ansible-galaxy install username.rolename roles_path -=============== +========== You can specify a particular directory where you want the downloaded roles to be placed:: diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 1cd936d028..a4a7b915f3 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -127,7 +127,7 @@ class GalaxyCLI(CLI): if self.action in ("import","info","init","install","login","search","setup","delete"): self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') - self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True, + self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False, help='Ignore SSL certificate validation errors.') if self.action in ("init","install"): @@ -505,7 +505,7 @@ class GalaxyCLI(CLI): terms = [] for i in range(len(self.args)): terms.append(self.args.pop()) - search = '+'.join(terms) + search = '+'.join(terms[::-1]) if not search and not self.options.platforms and not self.options.tags and not self.options.author: raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") @@ -520,9 +520,9 @@ class GalaxyCLI(CLI): data = '' if response['count'] > page_size: - data += ("Found %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) + data += ("\nFound %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) else: - data += ("Found %d roles matching your search:\n" % response['count']) + data += ("\nFound %d roles matching your search:\n" % response['count']) max_len = [] for role in response['results']: diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index c1bf2c4ed5..eec9ee932e 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -48,16 +48,15 @@ class GalaxyAPI(object): SUPPORTED_VERSIONS = ['v1'] def __init__(self, galaxy): - self.galaxy = galaxy self.token = GalaxyToken() self._api_server = C.GALAXY_SERVER - self._validate_certs = C.GALAXY_IGNORE_CERTS + self._validate_certs = not C.GALAXY_IGNORE_CERTS # set validate_certs - if galaxy.options.validate_certs == False: + if galaxy.options.ignore_certs: self._validate_certs = False - display.vvv('Check for valid certs: %s' % self._validate_certs) + display.vvv('Validate TLS certificates: %s' % self._validate_certs) # set the API server if galaxy.options.api_server != C.GALAXY_SERVER: @@ -65,14 +64,13 @@ class GalaxyAPI(object): display.vvv("Connecting to galaxy_server: %s" % self._api_server) server_version = self.get_server_api_version() - - if server_version in self.SUPPORTED_VERSIONS: - self.baseurl = '%s/api/%s' % (self._api_server, server_version) - self.version = server_version # for future use - display.vvv("Base API: %s" % self.baseurl) - else: + if not server_version in self.SUPPORTED_VERSIONS: raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) + self.baseurl = '%s/api/%s' % (self._api_server, server_version) + self.version = server_version # for future use + display.vvv("Base API: %s" % self.baseurl) + def __auth_header(self): token = self.token.get() if token is None: From 847f454bccb6ec3942ff5d652db7dd1db4d77159 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 23:25:23 -0500 Subject: [PATCH 264/590] Add a section to intro_configuration for Galaxy. --- docsite/rst/intro_configuration.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index dda07fc450..0ad54938d0 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -897,3 +897,19 @@ The normal behaviour is for operations to copy the existing context or use the u The default list is: nfs,vboxsf,fuse,ramfs:: special_context_filesystems = nfs,vboxsf,fuse,ramfs,myspecialfs + +Galaxy Settings +--------------- + +The following options can be set in the [galaxy] section of ansible.cfg: + +server +====== + +Override the default Galaxy server value of https://galaxy.ansible.com. + +ignore_certs +============ + +If set to *yes*, ansible-galaxy will not validate TLS certificates. Handy for testing against a server with a self-signed certificate +. \ No newline at end of file From 06dde0d332d88e958ac5489bea88f0f5bc536e1b Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Thu, 10 Dec 2015 10:57:48 -0500 Subject: [PATCH 265/590] Fixed documentation typos and bits that needed clarification. Fixed missing spaces in VALID_ACTIONS. --- docs/man/man1/ansible-galaxy.1.asciidoc.in | 19 ++++++++++--------- docsite/rst/galaxy.rst | 4 ++-- docsite/rst/intro_configuration.rst | 4 ++-- lib/ansible/cli/galaxy.py | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index 44f0b46b08..9ffe65e45a 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -147,8 +147,9 @@ configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) SEARCH ------ -The *search* sub-command returns a filtered list of roles found at -galaxy.ansible.com. +The *search* sub-command returns a filtered list of roles found on the remote +server. + USAGE ~~~~~ @@ -170,7 +171,7 @@ Provide a comma separated list of Platforms on which to filter. Specify the username of a Galaxy contributor on which to filter. -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -199,7 +200,7 @@ OPTIONS The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -213,7 +214,7 @@ LOGIN The *login* sub-command is used to authenticate with galaxy.ansible.com. Authentication is required to use the import, delete and setup commands. -It will authenticate the user,retrieve a token from Galaxy, and store it +It will authenticate the user, retrieve a token from Galaxy, and store it in the user's home directory. USAGE @@ -236,7 +237,7 @@ click on Personal Access Token to create a token. OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -262,7 +263,7 @@ $ ansible-galaxy import [options] github_user github_repo OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -291,7 +292,7 @@ $ ansible-galaxy delete [options] github_user github_repo OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -323,7 +324,7 @@ $ ansible-galaxy setup [options] source github_user github_repo secret OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 200fdfd575..f8cde57e62 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -364,8 +364,8 @@ When you create your .travis.yml file add the following to cause Travis to notif section of your ansible.cfg file. -List Travis Integrtions -======================= +List Travis Integrations +======================== Use the --list option to display your Travis integrations: diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 0ad54938d0..ccfb456ed9 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -906,10 +906,10 @@ The following options can be set in the [galaxy] section of ansible.cfg: server ====== -Override the default Galaxy server value of https://galaxy.ansible.com. +Override the default Galaxy server value of https://galaxy.ansible.com. Useful if you have a hosted version of the Galaxy web app or want to point to the testing site https://galaxy-qa.ansible.com. It does not work against private, hosted repos, which Galaxy can use for fetching and installing roles. ignore_certs ============ If set to *yes*, ansible-galaxy will not validate TLS certificates. Handy for testing against a server with a self-signed certificate -. \ No newline at end of file +. diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index a4a7b915f3..34afa03c9f 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -49,7 +49,7 @@ except ImportError: class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - VALID_ACTIONS = ("delete","import","info","init","install","list","login","remove","search","setup") + VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") def __init__(self, args): self.api = None From 95785f149d21badaf7cba35b4ffa7ed5805235d4 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Thu, 10 Dec 2015 21:44:03 -0500 Subject: [PATCH 266/590] Fix docs. The search command works with both galaxy.ansible.com and galaxy-qa.ansible.com. --- docsite/rst/galaxy.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index f8cde57e62..6d64a542b4 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -181,9 +181,7 @@ The search command will return a list of the first 1000 results matching your se .. note:: - The search command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access - `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] - section of your ansible.cfg file. + The format of results pictured here is new in Ansible 2.0. Get More Information About a Role --------------------------------- From 2bc3683d41b307611a03447e9d4b194ba6ef5c1c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 13 Dec 2015 05:54:57 -0800 Subject: [PATCH 267/590] Restore comment about for-else since it is an uncommon idiom --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 154404e474..254bab476b 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -119,7 +119,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type) if module_path: break - else: + else: # This is a for-else: http://bit.ly/1ElPkyg # Use Windows version of ping module to check module paths when # using a connection that supports .ps1 suffixes. We check specifically # for win_ping here, otherwise the code would look for ping.ps1 From 0c954bd14298a81be4c9026563326a87f9c42f58 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Sun, 13 Dec 2015 18:00:54 +0100 Subject: [PATCH 268/590] add --full flag to ansible-pull man page add --full flag that was added in #13502 --- docs/man/man1/ansible-pull.1.asciidoc.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index 333b8e34e0..0afba2aeaa 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -95,6 +95,10 @@ Force running of playbook even if unable to update playbook repository. This can be useful, for example, to enforce run-time state when a network connection may not always be up or possible. +*--full*:: + +Do a full clone of the repository. By default ansible-pull will do a shallow clone based on the last revision. + *-h*, *--help*:: Show the help message and exit. From 89603a0509117610e2cbebc6c48475a3b8af98b2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 13 Dec 2015 12:18:28 -0500 Subject: [PATCH 269/590] added that ansible-pull is now shallow to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bf11e6c5b..c6319634fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -350,6 +350,7 @@ newline being stripped you can change your playbook like this: * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. +* ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. * Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: ``` From f8ff63f8c8ab001ea8f096968b550f23262c193c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 03:06:52 -0500 Subject: [PATCH 270/590] A few tweaks to improve new forking code --- lib/ansible/plugins/strategy/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index ea30b800b0..4047bde73a 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -149,17 +149,20 @@ class StrategyBase: # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() + queued = False while True: (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] if worker_prc is None or not worker_prc.is_alive(): worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) self._workers[self._cur_worker][0] = worker_prc worker_prc.start() - break + queued = True self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 time.sleep(0.0001) + if queued: + break del task_vars self._pending_results += 1 @@ -196,7 +199,7 @@ class StrategyBase: else: iterator.mark_host_failed(host) (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) - if state.run_state != PlayIterator.ITERATING_RESCUE: + if not state or state.run_state != PlayIterator.ITERATING_RESCUE: self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: From 279c5a359631d296e1a91c1520417e68750138bb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 03:07:20 -0500 Subject: [PATCH 271/590] Cleanup strategy tests broken by new forking strategy --- .../plugins/strategies/test_strategy_base.py | 125 +++++++++++------- 1 file changed, 75 insertions(+), 50 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index bf01cf6fcc..7cc81a0324 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -24,8 +24,11 @@ from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError from ansible.plugins.strategy import StrategyBase +from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_queue_manager import TaskQueueManager from ansible.executor.task_result import TaskResult +from ansible.playbook.handler import Handler +from ansible.inventory.host import Host from six.moves import queue as Queue from units.mock.loader import DictDataLoader @@ -98,37 +101,44 @@ class TestStrategyBase(unittest.TestCase): mock_tqm._unreachable_hosts = ["host02"] self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:]) - def test_strategy_base_queue_task(self): + @patch.object(WorkerProcess, 'run') + def test_strategy_base_queue_task(self, mock_worker): + def fake_run(self): + return + + mock_worker.run.side_effect = fake_run + fake_loader = DictDataLoader() + mock_var_manager = MagicMock() + mock_host = MagicMock() + mock_inventory = MagicMock() + mock_options = MagicMock() + mock_options.module_path = None + + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_manager, + loader=fake_loader, + options=mock_options, + passwords=None, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() - workers = [] - for i in range(0, 3): - worker_main_q = MagicMock() - worker_main_q.put.return_value = None - worker_result_q = MagicMock() - workers.append([i, worker_main_q, worker_result_q]) - - mock_tqm = MagicMock() - mock_tqm._final_q = MagicMock() - mock_tqm.get_workers.return_value = workers - mock_tqm.get_loader.return_value = fake_loader - - strategy_base = StrategyBase(tqm=mock_tqm) - strategy_base._cur_worker = 0 - strategy_base._pending_results = 0 - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 1) - self.assertEqual(strategy_base._pending_results, 1) - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 2) - self.assertEqual(strategy_base._pending_results, 2) - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 0) - self.assertEqual(strategy_base._pending_results, 3) - workers[0][1].put.side_effect = EOFError - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 1) - self.assertEqual(strategy_base._pending_results, 3) + try: + strategy_base = StrategyBase(tqm=tqm) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 1) + self.assertEqual(strategy_base._pending_results, 1) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 2) + self.assertEqual(strategy_base._pending_results, 2) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 0) + self.assertEqual(strategy_base._pending_results, 3) + finally: + tqm.cleanup() + def test_strategy_base_process_pending_results(self): mock_tqm = MagicMock() @@ -156,6 +166,7 @@ class TestStrategyBase(unittest.TestCase): mock_iterator = MagicMock() mock_iterator.mark_host_failed.return_value = None + mock_iterator.get_next_task_for_host.return_value = (None, None) mock_host = MagicMock() mock_host.name = 'test01' @@ -315,22 +326,15 @@ class TestStrategyBase(unittest.TestCase): res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) self.assertEqual(res, []) - def test_strategy_base_run_handlers(self): - workers = [] - for i in range(0, 3): - worker_main_q = MagicMock() - worker_main_q.put.return_value = None - worker_result_q = MagicMock() - workers.append([i, worker_main_q, worker_result_q]) - - mock_tqm = MagicMock() - mock_tqm._final_q = MagicMock() - mock_tqm.get_workers.return_value = workers - mock_tqm.send_callback.return_value = None - + @patch.object(WorkerProcess, 'run') + def test_strategy_base_run_handlers(self, mock_worker): + def fake_run(*args): + return + mock_worker.side_effect = fake_run mock_play_context = MagicMock() - mock_handler_task = MagicMock() + mock_handler_task = MagicMock(Handler) + mock_handler_task.action = 'foo' mock_handler_task.get_name.return_value = "test handler" mock_handler_task.has_triggered.return_value = False @@ -341,11 +345,9 @@ class TestStrategyBase(unittest.TestCase): mock_play = MagicMock() mock_play.handlers = [mock_handler] - mock_host = MagicMock() + mock_host = MagicMock(Host) mock_host.name = "test01" - mock_iterator = MagicMock() - mock_inventory = MagicMock() mock_inventory.get_hosts.return_value = [mock_host] @@ -355,8 +357,31 @@ class TestStrategyBase(unittest.TestCase): mock_iterator = MagicMock mock_iterator._play = mock_play - strategy_base = StrategyBase(tqm=mock_tqm) - strategy_base._inventory = mock_inventory - strategy_base._notified_handlers = {"test handler": [mock_host]} + fake_loader = DictDataLoader() + mock_options = MagicMock() + mock_options.module_path = None - result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_mgr, + loader=fake_loader, + options=mock_options, + passwords=None, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() + + try: + strategy_base = StrategyBase(tqm=tqm) + + strategy_base._inventory = mock_inventory + strategy_base._notified_handlers = {"test handler": [mock_host]} + + mock_return_task = MagicMock(Handler) + mock_return_host = MagicMock(Host) + task_result = TaskResult(mock_return_host, mock_return_task, dict(changed=False)) + tqm._final_q.put(('host_task_ok', task_result)) + + result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) + finally: + tqm.cleanup() From f5f9b2fd354fe013e68f589279cc349a42a461fb Mon Sep 17 00:00:00 2001 From: Hans-Joachim Kliemeck Date: Mon, 14 Dec 2015 14:36:35 +0100 Subject: [PATCH 272/590] use default settings from ansible.cfg --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 012872be7c..48e0134672 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -246,7 +246,7 @@ class CLI(object): help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) if vault_opts: - parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', + parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file', help="vault password file", action="callback", callback=CLI.expand_tilde, type=str) From 1f8e484b70f90d34d127eda9cf10a619bb0e72e8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 3 Dec 2015 07:07:13 -0800 Subject: [PATCH 273/590] Fix the refresh flag in openstack inventory Refresh will update the dogpile cache from shade, but doesn't cause the ansible side json cache to be invalidated. It's a simple oversight. --- contrib/inventory/openstack.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index 46b43e9221..231488b06d 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -94,9 +94,9 @@ def get_groups_from_server(server_vars): return groups -def get_host_groups(inventory): +def get_host_groups(inventory, refresh=False): (cache_file, cache_expiration_time) = get_cache_settings() - if is_cache_stale(cache_file, cache_expiration_time): + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): groups = to_json(get_host_groups_from_cloud(inventory)) open(cache_file, 'w').write(groups) else: @@ -121,8 +121,10 @@ def get_host_groups_from_cloud(inventory): return groups -def is_cache_stale(cache_file, cache_expiration_time): +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True if os.path.isfile(cache_file): mod_time = os.path.getmtime(cache_file) current_time = time.time() @@ -176,7 +178,7 @@ def main(): ) if args.list: - output = get_host_groups(inventory) + output = get_host_groups(inventory, refresh=args.refresh) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) From 49dc9eea169efb329d7d184df53ce3dea4dface1 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Wed, 9 Dec 2015 15:11:21 -0500 Subject: [PATCH 274/590] add tests for encrypted hash mysql_user --- .../tasks/user_password_update_test.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index 50307cef95..9a899b206c 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -79,8 +79,23 @@ - include: remove_user.yml user_name={{user_name_2}} user_password={{ user_password_1 }} +- name: Create user with password1234 using hash. (expect changed=true) + mysql_user: name=jmainguy password='*D65798AAC0E5C6DF3F320F8A30E026E7EBD73A95' encrypted=yes + register: encrypt_result +- name: Check that the module made a change + assert: + that: + - "encrypt_result.changed == True" +- name: See if the password needs to be updated. (expect changed=false) + mysql_user: name=jmainguy password='password1234' + register: plain_result +- name: Check that the module did not change the password + assert: + that: + - "plain_result.changed == False" - +- name: Remove user (cleanup) + mysql_user: name=jmainguy state=absent From 9f61144401a16c9d610193522c71e8852addf63e Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 3 Dec 2015 07:04:24 -0800 Subject: [PATCH 275/590] Optionally only use UUIDs for openstack hosts on duplicates The OpenStack inventory lists hostnames as the UUIDs because hostsnames are not guarnateed to be unique on OpenStack. However, for the common case, this is just confusing. The new behavior is a visible change, so make it an opt-in via config. Only turn the hostnames to UUIDs if there are duplicate hostnames. --- contrib/inventory/openstack.py | 57 +++++++++++++++++++++++++++------ contrib/inventory/openstack.yml | 3 ++ 2 files changed, 50 insertions(+), 10 deletions(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index 231488b06d..b82a042c29 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -32,6 +32,13 @@ # all of them and present them as one contiguous inventory. # # See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server import argparse import collections @@ -51,7 +58,7 @@ import shade.inventory CONFIG_FILES = ['/etc/ansible/openstack.yaml'] -def get_groups_from_server(server_vars): +def get_groups_from_server(server_vars, namegroup=True): groups = [] region = server_vars['region'] @@ -76,7 +83,8 @@ def get_groups_from_server(server_vars): groups.append(extra_group) groups.append('instance-%s' % server_vars['id']) - groups.append(server_vars['name']) + if namegroup: + groups.append(server_vars['name']) for key in ('flavor', 'image'): if 'name' in server_vars[key]: @@ -106,17 +114,36 @@ def get_host_groups(inventory, refresh=False): def get_host_groups_from_cloud(inventory): groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) hostvars = {} - for server in inventory.list_hosts(): + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): if 'interface_ip' not in server: continue - for group in get_groups_from_server(server): - groups[group].append(server['id']) - hostvars[server['id']] = dict( - ansible_ssh_host=server['interface_ip'], - openstack=server, - ) + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + server = servers[0] + hostvars[name] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=False): + groups[group].append(server['name']) + else: + for server in servers: + server_id = server['id'] + hostvars[server_id] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=True): + groups[group].append(server_id) groups['_meta'] = {'hostvars': hostvars} return groups @@ -171,11 +198,21 @@ def main(): try: config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES shade.simple_logging(debug=args.debug) - inventory = shade.inventory.OpenStackInventory( + inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, ) + if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + } + )) + + inventory = shade.inventory.OpenStackInventory(**inventory_args) if args.list: output = get_host_groups(inventory, refresh=args.refresh) diff --git a/contrib/inventory/openstack.yml b/contrib/inventory/openstack.yml index a99bb02058..1520e2937e 100644 --- a/contrib/inventory/openstack.yml +++ b/contrib/inventory/openstack.yml @@ -26,3 +26,6 @@ clouds: username: stack password: stack project_name: stack +ansible: + use_hostnames: True + expand_hostvars: False From 6312e38133e79674910b2cb8c1b1aa695c6816fc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 10:35:38 -0500 Subject: [PATCH 276/590] Fixing up some non-py3 things for unit tests --- lib/ansible/executor/task_queue_manager.py | 2 +- lib/ansible/module_utils/known_hosts.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 9189ab9581..dae70a1292 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -99,7 +99,7 @@ class TaskQueueManager: def _initialize_processes(self, num): self._workers = [] - for i in xrange(num): + for i in range(num): main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() self._workers.append([None, main_q, rslt_q]) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index d2644d9766..2824836650 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -169,7 +169,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False): if not os.path.exists(user_ssh_dir): if create_dir: try: - os.makedirs(user_ssh_dir, 0700) + os.makedirs(user_ssh_dir, 0o700) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: From 80d23d639c2351ab6d0951763ca101516f0f2eb7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 10:43:30 -0500 Subject: [PATCH 277/590] Use an octal representation that works from 2.4->3+ for known_hosts --- lib/ansible/module_utils/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 2824836650..9b6af2a28e 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -169,7 +169,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False): if not os.path.exists(user_ssh_dir): if create_dir: try: - os.makedirs(user_ssh_dir, 0o700) + os.makedirs(user_ssh_dir, int('700', 8)) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: From c9eb41109f83358d8d968457728996f60b30b933 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 08:03:56 -0800 Subject: [PATCH 278/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0d23b3df52..e6b7b17326 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0d23b3df526875c8fc6edf94268f3aa850ec05f1 +Subproject commit e6b7b17326b4c9d11501112270c52ae25955938a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 51813e0033..f3251de29c 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 51813e003331c3341b07c5cda33346cada537a3b +Subproject commit f3251de29cb10664b2c63a0021530c3fe34111a3 From 457f86f61a3bef95b562dbf91b523c563bff2f63 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 08:50:37 -0800 Subject: [PATCH 279/590] Minor: Correct type pyhton => python --- test/integration/roles/test_docker/tasks/main.yml | 2 +- test/units/plugins/cache/test_cache.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml index 2ea15644d5..76b3fa7070 100644 --- a/test/integration/roles/test_docker/tasks/main.yml +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -3,7 +3,7 @@ #- include: docker-setup-rht.yml # Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when # they've got that sorted out - # CentOS 6 currently broken by conflicting files in pyhton-backports and python-backports-ssl_match_hostname + # CentOS 6 currently broken by conflicting files in python-backports and python-backports-ssl_match_hostname #when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 # python-docker isn't available until 14.10. Revist at the next Ubuntu LTS diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py index af1d924910..0547ba55bf 100644 --- a/test/units/plugins/cache/test_cache.py +++ b/test/units/plugins/cache/test_cache.py @@ -110,6 +110,6 @@ class TestAbstractClass(unittest.TestCase): def test_memory_cachemodule(self): self.assertIsInstance(MemoryCache(), MemoryCache) - @unittest.skipUnless(HAVE_REDIS, 'Redis pyhton module not installed') + @unittest.skipUnless(HAVE_REDIS, 'Redis python module not installed') def test_redis_cachemodule(self): self.assertIsInstance(RedisCache(), RedisCache) From e595c501976d5f378414dec90543151d7319253b Mon Sep 17 00:00:00 2001 From: gp Date: Mon, 14 Dec 2015 12:06:35 -0500 Subject: [PATCH 280/590] Fix typo in galaxy.rst Fix typo --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 783ac15e45..c9dea27336 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -126,7 +126,7 @@ The above will create the following directory structure in the current working d :: README.md - .travsis.yml + .travis.yml defaults/ main.yml files/ From a7ac98262d94cc24a584b8e163cebc0a2a492cd6 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 12 Dec 2015 20:18:36 +0100 Subject: [PATCH 281/590] Make module_utils.known_hosts.get_fqdn work on ipv6 --- lib/ansible/module_utils/known_hosts.py | 16 +++++++++------- .../units/module_utils/basic/test_known_hosts.py | 8 ++++++++ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 9b6af2a28e..64ad0c76c2 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -74,12 +74,12 @@ def get_fqdn(repo_url): if "@" in repo_url and "://" not in repo_url: # most likely an user@host:path or user@host/path type URL repo_url = repo_url.split("@", 1)[1] - if ":" in repo_url: - repo_url = repo_url.split(":")[0] - result = repo_url + if repo_url.startswith('['): + result = repo_url.split(']', 1)[0] + ']' + elif ":" in repo_url: + result = repo_url.split(":")[0] elif "/" in repo_url: - repo_url = repo_url.split("/")[0] - result = repo_url + result = repo_url.split("/")[0] elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse.urlparse(repo_url) @@ -87,11 +87,13 @@ def get_fqdn(repo_url): # ensure we actually have a parts[1] before continuing. if parts[1] != '': result = parts[1] - if ":" in result: - result = result.split(":")[0] if "@" in result: result = result.split("@", 1)[1] + if result[0].startswith('['): + result = result.split(']', 1)[0] + ']' + elif ":" in result: + result = result.split(":")[0] return result def check_hostkey(module, fqdn): diff --git a/test/units/module_utils/basic/test_known_hosts.py b/test/units/module_utils/basic/test_known_hosts.py index 952184bfec..515d67686d 100644 --- a/test/units/module_utils/basic/test_known_hosts.py +++ b/test/units/module_utils/basic/test_known_hosts.py @@ -33,6 +33,14 @@ class TestAnsibleModuleKnownHosts(unittest.TestCase): {'is_ssh_url': True, 'get_fqdn': 'five.example.org'}, 'ssh://six.example.org:21/example.org': {'is_ssh_url': True, 'get_fqdn': 'six.example.org'}, + 'ssh://[2001:DB8::abcd:abcd]/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'ssh://[2001:DB8::abcd:abcd]:22/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'username@[2001:DB8::abcd:abcd]/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'username@[2001:DB8::abcd:abcd]:22/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, } def test_is_ssh_url(self): From 8d16638fec3e88e0f7b0dde24aae095100436644 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 10:54:10 -0800 Subject: [PATCH 282/590] Fix for template module not creating a file that was not present when force=false --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 109f3e80c0..d134f80a8d 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -150,7 +150,7 @@ class ActionModule(ActionBase): diff = {} new_module_args = self._task.args.copy() - if force and local_checksum != remote_checksum: + if (remote_checksum == '1') or (force and local_checksum != remote_checksum): result['changed'] = True # if showing diffs, we need to get the remote value From 27cd7668c152c5b2b74a10ffe78bfca7a11aeaac Mon Sep 17 00:00:00 2001 From: Peter Sprygada Date: Tue, 8 Dec 2015 07:34:09 -0500 Subject: [PATCH 283/590] the ssh shared module will try to use keys if the password is not supplied The current ssh shared module forces only password based authentication. This change will allow the ssh module to use keys if a password is not provided. --- lib/ansible/module_utils/ssh.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/ssh.py b/lib/ansible/module_utils/ssh.py index 343f017a98..00922ef8cd 100644 --- a/lib/ansible/module_utils/ssh.py +++ b/lib/ansible/module_utils/ssh.py @@ -91,12 +91,17 @@ class Ssh(object): def __init__(self): self.client = None - def open(self, host, port=22, username=None, password=None, timeout=10): + def open(self, host, port=22, username=None, password=None, + timeout=10, key_filename=None): + ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + use_keys = password is None + ssh.connect(host, port=port, username=username, password=password, - timeout=timeout, allow_agent=False, look_for_keys=False) + timeout=timeout, allow_agent=use_keys, look_for_keys=use_keys, + key_filename=key_filename) self.client = ssh return self.on_open() From be4d1f9ee380705768574baefb75830e3c76afa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Tue, 15 Dec 2015 12:49:20 +0100 Subject: [PATCH 284/590] Fix a part of python 3 tests (make tests-py3, see https://github.com/ansible/ansible/issues/13553 for more details). --- lib/ansible/module_utils/known_hosts.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 64ad0c76c2..52b0bb74b0 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -28,7 +28,11 @@ import os import hmac -import urlparse + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse try: from hashlib import sha1 From a0842781a6a77a0e51ad411ab186395379cc4dcb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 15 Dec 2015 08:44:43 -0500 Subject: [PATCH 285/590] renamed ssh.py shared module file to clarify --- lib/ansible/module_utils/{ssh.py => issh.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename lib/ansible/module_utils/{ssh.py => issh.py} (100%) diff --git a/lib/ansible/module_utils/ssh.py b/lib/ansible/module_utils/issh.py similarity index 100% rename from lib/ansible/module_utils/ssh.py rename to lib/ansible/module_utils/issh.py From be5488cb60869c67b0ea521a4044062157817e50 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 15 Dec 2015 09:27:53 -0500 Subject: [PATCH 286/590] clean debug output to match prev versions --- lib/ansible/plugins/callback/__init__.py | 6 ++++++ lib/ansible/plugins/callback/default.py | 1 + lib/ansible/plugins/callback/minimal.py | 1 + 3 files changed, 8 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index b8a48943f2..7371fe0a51 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -140,6 +140,12 @@ class CallbackBase: else: self.v2_playbook_item_on_ok(newres) + def _clean_results(self, result, task_name): + if 'changed' in result and task_name in ['debug']: + del result['changed'] + if 'invocation' in result and task_name in ['debug']: + del result['invocation'] + def set_play_context(self, play_context): pass diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 1f37f4b975..e515945bba 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -62,6 +62,7 @@ class CallbackModule(CallbackBase): def v2_runner_on_ok(self, result): + self._clean_results(result._result, result._task.action) delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': return diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index f855c1a6e5..71f9f5dfee 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -64,6 +64,7 @@ class CallbackModule(CallbackBase): self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red') def v2_runner_on_ok(self, result): + self._clean_results(result._result, result._task.action) if result._task.action in C.MODULE_NO_JSON: self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green') else: From fcc9258b743d2f596628f28dd4cdc01f0f8d306e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 01:48:22 -0500 Subject: [PATCH 287/590] Use the original host rather than the serialized one when processing results Fixes #13526 Fixes #13564 Fixes #13566 --- lib/ansible/plugins/strategy/__init__.py | 25 +++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 4047bde73a..d2d79d036b 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -185,10 +185,20 @@ class StrategyBase: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result],)) + # helper method, used to find the original host from the one + # returned in the result/message, which has been serialized and + # thus had some information stripped from it to speed up the + # serialization process + def get_original_host(host): + if host.name in self._inventory._hosts_cache: + return self._inventory._hosts_cache[host.name] + else: + return self._inventory.get_host(host.name) + # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] - host = task_result._host + host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: @@ -244,7 +254,7 @@ class StrategyBase: self._add_host(new_host_info, iterator) elif result[0] == 'add_group': - host = result[1] + host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) @@ -252,19 +262,20 @@ class StrategyBase: task_result = result[1] handler_name = result[2] - original_task = iterator.get_original_task(task_result._host, task_result._task) + original_host = get_original_host(task_result._host) + original_task = iterator.get_original_task(original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] - if task_result._host not in self._notified_handlers[handler_name]: - self._notified_handlers[handler_name].append(task_result._host) + if original_host not in self._notified_handlers[handler_name]: + self._notified_handlers[handler_name].append(original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name,)) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache - host = result[1] + host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register @@ -278,7 +289,7 @@ class StrategyBase: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): - host = result[1] + host = get_original_host(result[1]) task = result[2] item = result[3] From 9942d71d345cf221dbcdb19f362d80430d995905 Mon Sep 17 00:00:00 2001 From: Matt Clay Date: Wed, 16 Dec 2015 01:37:02 -0800 Subject: [PATCH 288/590] Test for filename option in apt_repository module. --- .../roles/test_apt_repository/tasks/apt.yml | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/test/integration/roles/test_apt_repository/tasks/apt.yml b/test/integration/roles/test_apt_repository/tasks/apt.yml index 49d13bc52a..9c8e3ab447 100644 --- a/test/integration/roles/test_apt_repository/tasks/apt.yml +++ b/test/integration/roles/test_apt_repository/tasks/apt.yml @@ -2,6 +2,7 @@ - set_fact: test_ppa_name: 'ppa:menulibre-dev/devel' + test_ppa_filename: 'menulibre-dev' test_ppa_spec: 'deb http://ppa.launchpad.net/menulibre-dev/devel/ubuntu {{ansible_distribution_release}} main' test_ppa_key: 'A7AD98A1' # http://keyserver.ubuntu.com:11371/pks/lookup?search=0xD06AAF4C11DAB86DF421421EFE6B20ECA7AD98A1&op=index @@ -144,6 +145,47 @@ - name: 'ensure ppa key is absent (expect: pass)' apt_key: id='{{test_ppa_key}}' state=absent +# +# TEST: apt_repository: repo= filename= +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= filename= (expect: pass)' + apt_repository: repo='{{test_ppa_spec}}' filename='{{test_ppa_filename}}' state=present + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_spec}}"' + +- name: 'examine source file' + stat: path='/etc/apt/sources.list.d/{{test_ppa_filename}}.list' + register: source_file + +- name: 'assert source file exists' + assert: + that: + - 'source_file.stat.exists == True' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +# When installing a repo with the spec, the key is *NOT* added +- name: 'ensure ppa key is absent (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=absent + # # TEARDOWN # From 63b624707d0bcb057cec7c81d86b511106cba512 Mon Sep 17 00:00:00 2001 From: David Date: Wed, 16 Dec 2015 23:46:06 +0800 Subject: [PATCH 289/590] Fix typo --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 516403ac80..c6c01db5d4 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -132,7 +132,7 @@ Note that you cannot do variable substitution when including one playbook inside another. .. note:: - You can not conditionally path the location to an include file, + You can not conditionally pass the location to an include file, like you can with 'vars_files'. If you find yourself needing to do this, consider how you can restructure your playbook to be more class/role oriented. This is to say you cannot use a 'fact' to From 73ead4fbbadb8ad874f95f0dd542256b2ad730aa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 20:05:55 -0800 Subject: [PATCH 290/590] First attempt to fix https certificate errors through a proxy with python-2.7.9+ Fixes #12549 --- lib/ansible/module_utils/urls.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 979d5943dd..0f45c36034 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -326,11 +326,15 @@ class CustomHTTPSConnection(httplib.HTTPSConnection): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) + + server_hostname = self.host if self._tunnel_host: self.sock = sock self._tunnel() + server_hostname = self._tunnel_host + if HAS_SSLCONTEXT: - self.sock = self.context.wrap_socket(sock, server_hostname=self.host) + self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) else: self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) @@ -542,7 +546,7 @@ class SSLValidationHandler(urllib2.BaseHandler): connect_result = s.recv(4096) self.validate_proxy_response(connect_result) if context: - ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname')) + ssl_s = context.wrap_socket(s, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) From 72a0654b81aec47e9fa989ba8c1d50a55a093f6f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 15 Dec 2015 15:35:13 -0800 Subject: [PATCH 291/590] Fixes for proxy on RHEL5 --- lib/ansible/module_utils/urls.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 0f45c36034..d0ee260e17 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -328,6 +328,8 @@ class CustomHTTPSConnection(httplib.HTTPSConnection): sock = socket.create_connection((self.host, self.port), self.timeout) server_hostname = self.host + # Note: self._tunnel_host is not available on py < 2.6 but this code + # isn't used on py < 2.6 (lack of create_connection) if self._tunnel_host: self.sock = sock self._tunnel() @@ -377,7 +379,10 @@ def generic_urlparse(parts): # get the username, password, etc. try: netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$') - (auth, hostname, port) = netloc_re.match(parts[1]) + match = netloc_re.match(parts[1]) + auth = match.group(1) + hostname = match.group(2) + port = match.group(3) if port: # the capture group for the port will include the ':', # so remove it and convert the port to an integer @@ -387,6 +392,8 @@ def generic_urlparse(parts): # and then split it up based on the first ':' found auth = auth[:-1] username, password = auth.split(':', 1) + else: + username = password = None generic_parts['username'] = username generic_parts['password'] = password generic_parts['hostname'] = hostname @@ -394,7 +401,7 @@ def generic_urlparse(parts): except: generic_parts['username'] = None generic_parts['password'] = None - generic_parts['hostname'] = None + generic_parts['hostname'] = parts[1] generic_parts['port'] = None return generic_parts @@ -536,7 +543,8 @@ class SSLValidationHandler(urllib2.BaseHandler): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if https_proxy: proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy)) - s.connect((proxy_parts.get('hostname'), proxy_parts.get('port'))) + port = proxy_parts.get('port') or 443 + s.connect((proxy_parts.get('hostname'), port)) if proxy_parts.get('scheme') == 'http': s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port)) if proxy_parts.get('username'): From 33863eb653f3ed4d6f30ab816743443f473c5eae Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 07:38:51 -0800 Subject: [PATCH 292/590] Conditionally create the CustomHTTPSConnection class only if we have the required baseclasses. Fixes #11918 --- lib/ansible/module_utils/urls.py | 66 +++++++++++++++++--------------- 1 file changed, 35 insertions(+), 31 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index d0ee260e17..41613f6cb6 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -310,42 +310,45 @@ class NoSSLError(SSLValidationError): """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" pass +# Some environments (Google Compute Engine's CoreOS deploys) do not compile +# against openssl and thus do not have any HTTPS support. +CustomHTTPSConnection = CustomHTTPSHandler = None +if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib2, 'HTTPSHandler'): + class CustomHTTPSConnection(httplib.HTTPSConnection): + def __init__(self, *args, **kwargs): + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + if HAS_SSLCONTEXT: + self.context = create_default_context() + if self.cert_file: + self.context.load_cert_chain(self.cert_file, self.key_file) -class CustomHTTPSConnection(httplib.HTTPSConnection): - def __init__(self, *args, **kwargs): - httplib.HTTPSConnection.__init__(self, *args, **kwargs) - if HAS_SSLCONTEXT: - self.context = create_default_context() - if self.cert_file: - self.context.load_cert_chain(self.cert_file, self.key_file) + def connect(self): + "Connect to a host on a given (SSL) port." - def connect(self): - "Connect to a host on a given (SSL) port." + if hasattr(self, 'source_address'): + sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) + else: + sock = socket.create_connection((self.host, self.port), self.timeout) - if hasattr(self, 'source_address'): - sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) - else: - sock = socket.create_connection((self.host, self.port), self.timeout) + server_hostname = self.host + # Note: self._tunnel_host is not available on py < 2.6 but this code + # isn't used on py < 2.6 (lack of create_connection) + if self._tunnel_host: + self.sock = sock + self._tunnel() + server_hostname = self._tunnel_host - server_hostname = self.host - # Note: self._tunnel_host is not available on py < 2.6 but this code - # isn't used on py < 2.6 (lack of create_connection) - if self._tunnel_host: - self.sock = sock - self._tunnel() - server_hostname = self._tunnel_host + if HAS_SSLCONTEXT: + self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) + else: + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) - if HAS_SSLCONTEXT: - self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) - else: - self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) + class CustomHTTPSHandler(urllib2.HTTPSHandler): -class CustomHTTPSHandler(urllib2.HTTPSHandler): + def https_open(self, req): + return self.do_open(CustomHTTPSConnection, req) - def https_open(self, req): - return self.do_open(CustomHTTPSConnection, req) - - https_request = urllib2.AbstractHTTPHandler.do_request_ + https_request = urllib2.AbstractHTTPHandler.do_request_ def generic_urlparse(parts): ''' @@ -673,8 +676,9 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, handlers.append(proxyhandler) # pre-2.6 versions of python cannot use the custom https - # handler, since the socket class is lacking this method - if hasattr(socket, 'create_connection'): + # handler, since the socket class is lacking create_connection. + # Some python builds lack HTTPS support. + if hasattr(socket, 'create_connection') and CustomHTTPSHandler: handlers.append(CustomHTTPSHandler) opener = urllib2.build_opener(*handlers) From 0095d04af9712c0c026b29e45dbe57a70e30f1e0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 08:02:46 -0800 Subject: [PATCH 293/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e6b7b17326..50e7bff554 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e6b7b17326b4c9d11501112270c52ae25955938a +Subproject commit 50e7bff554647ccd8a34729171420e72b3a00c61 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f3251de29c..bde5686552 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f3251de29cb10664b2c63a0021530c3fe34111a3 +Subproject commit bde5686552fdd88a758c7197b2eebe98b1afbf07 From 6a252a3f7727649c61c007e73f04201fd6fbdfa8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 11:21:19 -0500 Subject: [PATCH 294/590] Preserve the cumulative path for checking includes which have parents Otherwise, each relative include path is checked on its own, rather than in relation to the (possibly relative) path of its parent, meaning includes multiple level deep may fail to find the correct (or any) file. Fixes #13472 --- lib/ansible/playbook/included_file.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index b7c0fb8175..7fb851a12a 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -81,14 +81,19 @@ class IncludedFile: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = original_task._task_include + cumulative_path = None while parent_include is not None: parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params'))) + if cumulative_path is None: + cumulative_path = parent_include_dir + elif not os.path.isabs(cumulative_path): + cumulative_path = os.path.join(parent_include_dir, cumulative_path) include_target = templar.template(include_result['include']) if original_task._role: - new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir) + new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path) include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target) else: - include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target) + include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): break From 375eb501b3b1edf7fd91807374edfcd60ca736b8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 09:40:01 -0800 Subject: [PATCH 295/590] Update url to site that has an invalid certificate --- test/integration/roles/test_get_url/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 6e3842f6ab..09ee34277a 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -28,7 +28,7 @@ - name: test https fetch to a site with mismatched hostname and certificate get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result @@ -46,7 +46,7 @@ - name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result From 34e88e48a567d52e3ed0c3ecb6a5aa578e53dd19 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 16 Nov 2015 22:08:15 -0500 Subject: [PATCH 296/590] Add shared connection code for mysql modules --- lib/ansible/module_utils/mysql.py | 66 +++++++++++++++ .../utils/module_docs_fragments/mysql.py | 84 +++++++++++++++++++ .../tasks/user_password_update_test.yml | 1 - .../tasks/assert_fail_msg.yml | 2 - 4 files changed, 150 insertions(+), 3 deletions(-) create mode 100644 lib/ansible/module_utils/mysql.py create mode 100644 lib/ansible/utils/module_docs_fragments/mysql.py diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py new file mode 100644 index 0000000000..48e00adfd9 --- /dev/null +++ b/lib/ansible/module_utils/mysql.py @@ -0,0 +1,66 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Jonathan Mainguy , 2015 +# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None): + config = { + 'host': module.params['login_host'], + 'ssl': { + } + } + + if module.params['login_unix_socket']: + config['unix_socket'] = module.params['login_unix_socket'] + else: + config['port'] = module.params['login_port'] + + if os.path.exists(config_file): + config['read_default_file'] = config_file + + # If login_user or login_password are given, they should override the + # config file + if login_user is not None: + config['user'] = login_user + if login_password is not None: + config['passwd'] = login_password + if ssl_cert is not None: + config['ssl']['cert'] = ssl_cert + if ssl_key is not None: + config['ssl']['key'] = ssl_key + if ssl_ca is not None: + config['ssl']['ca'] = ssl_ca + if db is not None: + config['db'] = db + + db_connection = MySQLdb.connect(**config) + if cursor_class is not None: + return db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) + else: + return db_connection.cursor() diff --git a/lib/ansible/utils/module_docs_fragments/mysql.py b/lib/ansible/utils/module_docs_fragments/mysql.py new file mode 100644 index 0000000000..5dd1e04f93 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/mysql.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015 Jonathan Mainguy +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +class ModuleDocFragment(object): + + # Standard mysql documentation fragment + DOCUMENTATION = ''' +options: + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - Host running the database + required: false + default: localhost + login_port: + description: + - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used + required: false + default: 3306 + login_unix_socket: + description: + - The path to a Unix domain socket for local connections + required: false + default: null + config_file: + description: + - Specify a config file from which user and password are to be read + required: false + default: '~/.my.cnf' + version_added: "2.0" + ssl_ca: + required: false + default: null + version_added: "2.0" + description: + - The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate as used by the server. + ssl_cert: + required: false + default: null + version_added: "2.0" + description: + - The path to a client public key certificate. + ssl_key: + required: false + default: null + version_added: "2.0" + description: + - The path to the client private key. +requirements: + - MySQLdb +notes: + - Requires the MySQLdb Python package on the remote host. For Ubuntu, this + is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this + is as easy as yum install MySQL-python. (See M(yum).) + - Both C(login_password) and C(login_user) are required when you are + passing credentials. If none are present, the module will attempt to read + the credentials from C(~/.my.cnf), and finally fall back to using the MySQL + default login of 'root' with no password. +''' diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index 50307cef95..904165c33e 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -63,7 +63,6 @@ assert: that: - "result.failed == true" - - "'check login credentials (login_user, and login_password' in result.msg" - name: create database using user2 and new password mysql_db: name={{ db_name }} state=present login_user={{ user_name_2 }} login_password={{ user_password_1 }} diff --git a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml index 70aa26856e..ba51b9d67c 100644 --- a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml +++ b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml @@ -23,5 +23,3 @@ assert: that: - "output.failed == true" - - "'{{msg}}' in output.msg" - From 851c0058b148ce041af5ca5c9fbdf25ff854cf8f Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Wed, 16 Dec 2015 12:45:05 -0600 Subject: [PATCH 297/590] Removing yaml support for path: --- docsite/rst/galaxy.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index c9dea27336..f4ca16cb8f 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -73,10 +73,6 @@ And here's an example showing some specific version downloads from multiple sour # from GitHub - src: https://github.com/bennojoy/nginx - # from GitHub installing to a relative path - - src: https://github.com/bennojoy/nginx - path: vagrant/roles/ - # from GitHub, overriding the name and specifying a specific tag - src: https://github.com/bennojoy/nginx version: master @@ -98,7 +94,6 @@ And here's an example showing some specific version downloads from multiple sour - src: git@gitlab.company.com:mygroup/ansible-base.git scm: git version: 0.1.0 - path: roles/ As you can see in the above, there are a large amount of controls available to customize where roles can be pulled from, and what to save roles as. From 6109f703970d741df6e2e28e750667f5d0083fda Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 13:56:55 -0500 Subject: [PATCH 298/590] Attempt at fixing strategy unit test failures on py2.6 and py3 --- test/units/plugins/strategies/test_strategy_base.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 7cc81a0324..53e243f926 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -377,9 +377,7 @@ class TestStrategyBase(unittest.TestCase): strategy_base._inventory = mock_inventory strategy_base._notified_handlers = {"test handler": [mock_host]} - mock_return_task = MagicMock(Handler) - mock_return_host = MagicMock(Host) - task_result = TaskResult(mock_return_host, mock_return_task, dict(changed=False)) + task_result = TaskResult(Host('host01'), Handler(), dict(changed=False)) tqm._final_q.put(('host_task_ok', task_result)) result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) From 9724117bbb6c09a4d6d2e1f6573e69db697bdcc7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 11:15:39 -0800 Subject: [PATCH 299/590] Update submodule refs for mysql refactor --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 50e7bff554..3c48320b29 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 50e7bff554647ccd8a34729171420e72b3a00c61 +Subproject commit 3c48320b295c3b4f99caccdc5f173b224109a393 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index bde5686552..8ec4f95ffd 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit bde5686552fdd88a758c7197b2eebe98b1afbf07 +Subproject commit 8ec4f95ffd6d4e837cf0f3dd28649fb09afd0caf From baece499dfb6a8d8556db2b686d4f3c86d1d25b1 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 16 Dec 2015 11:47:12 -0800 Subject: [PATCH 300/590] fix plugin loading for Windows modules force plugin loader to only consider .py files, since that's the only place docs can live ATM... --- lib/ansible/cli/doc.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index a17164eb50..265b1c9a3f 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -90,7 +90,8 @@ class DocCLI(CLI): for module in self.args: try: - filename = module_loader.find_plugin(module) + # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs + filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue @@ -167,7 +168,8 @@ class DocCLI(CLI): if module in module_docs.BLACKLIST_MODULES: continue - filename = module_loader.find_plugin(module) + # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs + filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: continue From 491fd754f1cbe1944b0f45690842fd49b5977775 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 16:35:56 -0500 Subject: [PATCH 301/590] Updating the porting guide to note the complex args/bare vars change Related to #13518 --- docsite/rst/porting_guide_2.0.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 8d69ecd440..543be052bd 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -55,6 +55,24 @@ uses key=value escaping which has not changed. The other option is to check for # Output "msg": "Testing some things" +* When specifying complex args as a variable, the variable must use the full jinja2 + variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. + In fact, even specifying args with variables has been deprecated, and will not be + allowed in future versions:: + + --- + - hosts: localhost + connection: local + gather_facts: false + vars: + my_dirs: + - { path: /tmp/3a, state: directory, mode: 0755 } + - { path: /tmp/3b, state: directory, mode: 0700 } + tasks: + - file: + args: "{{item}}" # <- args here uses the full variable syntax + with_items: my_dirs + * porting task includes * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 From 8716bf8021800a18cb8d6cfea3f296ba4f834692 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 16:32:06 -0500 Subject: [PATCH 302/590] All variables in complex args again Also updates the CHANGELOG to note the slight change, where bare variables in args are no longer allowed to be bare variables Fixes #13518 --- CHANGELOG.md | 20 ++++++++++++++++++++ lib/ansible/parsing/mod_args.py | 11 ++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6319634fb..005171ec9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,26 @@ newline being stripped you can change your playbook like this: "msg": "Testing some things" ``` +* When specifying complex args as a variable, the variable must use the full jinja2 +variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. +In fact, even specifying args with variables has been deprecated, and will not be +allowed in future versions: + + ``` + --- + - hosts: localhost + connection: local + gather_facts: false + vars: + my_dirs: + - { path: /tmp/3a, state: directory, mode: 0755 } + - { path: /tmp/3b, state: directory, mode: 0700 } + tasks: + - file: + args: "{{item}}" + with_items: my_dirs + ``` + ###Plugins * Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index abc35a415e..86b2d0d996 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -137,7 +137,16 @@ class ModuleArgsParser: # than those which may be parsed/normalized next final_args = dict() if additional_args: - final_args.update(additional_args) + if isinstance(additional_args, string_types): + templar = Templar(loader=None) + if templar._contains_vars(additional_args): + final_args['_variable_params'] = additional_args + else: + raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')") + elif isinstance(additional_args, dict): + final_args.update(additional_args) + else: + raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").') # how we normalize depends if we figured out what the module name is # yet. If we have already figured it out, it's an 'old style' invocation. From fffd29d1ab15dc93a2854f874695b63e15d5c198 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 14:06:11 -0800 Subject: [PATCH 303/590] Update mysql setup to handle installing mysql with dnf too. --- test/integration/roles/setup_mysql_db/tasks/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/integration/roles/setup_mysql_db/tasks/main.yml b/test/integration/roles/setup_mysql_db/tasks/main.yml index a8010e7138..612d94f6d1 100644 --- a/test/integration/roles/setup_mysql_db/tasks/main.yml +++ b/test/integration/roles/setup_mysql_db/tasks/main.yml @@ -31,6 +31,11 @@ with_items: mysql_packages when: ansible_pkg_mgr == 'yum' +- name: install mysqldb_test rpm dependencies + dnf: name={{ item }} state=latest + with_items: mysql_packages + when: ansible_pkg_mgr == 'dnf' + - name: install mysqldb_test debian dependencies apt: name={{ item }} state=latest with_items: mysql_packages From fd4ad2c8f24be48e2fa103a6b8feae287c4b57fe Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 14:08:08 -0800 Subject: [PATCH 304/590] Update submodule ref to fix a bug in mysql_user with mariadb --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 3c48320b29..16a3bdaa7d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 3c48320b295c3b4f99caccdc5f173b224109a393 +Subproject commit 16a3bdaa7da9e9f7c0572d3a3fdbfd79f29c2b9d From 857456ea5f159bbd333528aa6111b1510e1be78b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 18:21:47 -0500 Subject: [PATCH 305/590] Fixing template integration test for python 2.6 versions No longer immediately fallback to to_json if simplejson is not installed --- lib/ansible/plugins/filter/core.py | 4 +++- test/integration/roles/test_template/tasks/main.yml | 7 ------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index 3ab9db5a51..dc9acb4d09 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -100,9 +100,11 @@ def to_nice_json(a, *args, **kw): else: if major >= 2: return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw) + try: + return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw) + except: # Fallback to the to_json filter return to_json(a, *args, **kw) - return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw) def bool(a): ''' return a bool for the arg ''' diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 28477d44e5..9fd1d860e0 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -49,13 +49,6 @@ - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt -# Seems that python-2.6 now outputs the same format as everywhere else? -# when: pyver.stdout != '2.6' - -#- name: copy known good into place -# copy: src=foo-py26.txt dest={{output_dir}}/foo.txt -# when: pyver.stdout == '2.6' - - name: compare templated file to known good shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt register: diff_result From 15135f3c16a87f68bede61415f2571097eaa6268 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 19:12:05 -0500 Subject: [PATCH 306/590] Make sure we're using the original host when processing include results Also fixes a bug where we were passing an incorrect number of parameters to _do_handler_run() when processing an include file in a handler task/block. Fixes #13560 --- lib/ansible/playbook/included_file.py | 15 +++++++++++---- lib/ansible/plugins/strategy/__init__.py | 2 ++ lib/ansible/plugins/strategy/free.py | 10 ++++++++-- lib/ansible/plugins/strategy/linear.py | 10 ++++++++-- 4 files changed, 29 insertions(+), 8 deletions(-) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index 7fb851a12a..cc756a75a9 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -49,9 +49,15 @@ class IncludedFile: return "%s (%s): %s" % (self._filename, self._args, self._hosts) @staticmethod - def process_include_results(results, tqm, iterator, loader, variable_manager): + def process_include_results(results, tqm, iterator, inventory, loader, variable_manager): included_files = [] + def get_original_host(host): + if host.name in inventory._hosts_cache: + return inventory._hosts_cache[host.name] + else: + return inventory.get_host(host.name) + for res in results: if res._task.action == 'include': @@ -67,9 +73,10 @@ class IncludedFile: if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: continue - original_task = iterator.get_original_task(res._host, res._task) + original_host = get_original_host(res._host) + original_task = iterator.get_original_task(original_host, res._task) - task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task) + task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=original_host, task=original_task) templar = Templar(loader=loader, variables=task_vars) include_variables = include_result.get('include_variables', dict()) @@ -116,6 +123,6 @@ class IncludedFile: except ValueError: included_files.append(inc_file) - inc_file.add_host(res._host) + inc_file.add_host(original_host) return included_files diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index d2d79d036b..7b2a3794ef 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -576,6 +576,7 @@ class StrategyBase: host_results, self._tqm, iterator=iterator, + inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager ) @@ -594,6 +595,7 @@ class StrategyBase: for task in block.block: result = self._do_handler_run( handler=task, + handler_name=None, iterator=iterator, play_context=play_context, notified_hosts=included_file._hosts[:], diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 11eeaa9249..f4fc1226a1 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -139,8 +139,14 @@ class StrategyModule(StrategyBase): host_results.extend(results) try: - included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, - loader=self._loader, variable_manager=self._variable_manager) + included_files = IncludedFile.process_include_results( + host_results, + self._tqm, + iterator=iterator, + inventory=self._inventory, + loader=self._loader, + variable_manager=self._variable_manager + ) except AnsibleError as e: return False diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 8c94267cf4..7bb227dbae 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -261,8 +261,14 @@ class StrategyModule(StrategyBase): break try: - included_files = IncludedFile.process_include_results(host_results, self._tqm, - iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) + included_files = IncludedFile.process_include_results( + host_results, + self._tqm, + iterator=iterator, + inventory=self._inventory, + loader=self._loader, + variable_manager=self._variable_manager + ) except AnsibleError as e: return False From e5c2c03dea0998872a6b16a18d6c187685a5fc7a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 15 Dec 2015 09:39:13 -0500 Subject: [PATCH 307/590] Enable host_key checking at the strategy level Implements a new method in the ssh connection plugin (fetch_and_store_key) which is used to prefetch the key using ssh-keyscan. --- lib/ansible/executor/task_executor.py | 17 +- lib/ansible/inventory/host.py | 11 +- lib/ansible/plugins/connection/__init__.py | 5 +- lib/ansible/plugins/connection/ssh.py | 193 +++++++++++++++++++-- lib/ansible/plugins/strategy/__init__.py | 30 +++- lib/ansible/utils/connection.py | 50 ++++++ 6 files changed, 273 insertions(+), 33 deletions(-) create mode 100644 lib/ansible/utils/connection.py diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 5d7430fad2..2623bc775b 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -32,6 +32,7 @@ from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVar from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.template import Templar +from ansible.utils.connection import get_smart_connection_type from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode @@ -564,21 +565,7 @@ class TaskExecutor: conn_type = self._play_context.connection if conn_type == 'smart': - conn_type = 'ssh' - if sys.platform.startswith('darwin') and self._play_context.password: - # due to a current bug in sshpass on OSX, which can trigger - # a kernel panic even for non-privileged users, we revert to - # paramiko on that OS when a SSH password is specified - conn_type = "paramiko" - else: - # see if SSH can support ControlPersist if not use paramiko - try: - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err or "Usage:" in err: - conn_type = "paramiko" - except OSError: - conn_type = "paramiko" + conn_type = get_smart_connection_type(self._play_context) connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin) if not connection: diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 6263dcbc80..70f9f57b5f 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -57,6 +57,7 @@ class Host: name=self.name, vars=self.vars.copy(), address=self.address, + has_hostkey=self.has_hostkey, uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, @@ -65,10 +66,11 @@ class Host: def deserialize(self, data): self.__init__() - self.name = data.get('name') - self.vars = data.get('vars', dict()) - self.address = data.get('address', '') - self._uuid = data.get('uuid', uuid.uuid4()) + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.address = data.get('address', '') + self.has_hostkey = data.get('has_hostkey', False) + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -89,6 +91,7 @@ class Host: self._gathered_facts = False self._uuid = uuid.uuid4() + self.has_hostkey = False def __repr__(self): return self.get_name() diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 06616bac4c..7fc19c8c19 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -23,11 +23,11 @@ __metaclass__ = type import fcntl import gettext import os + from abc import ABCMeta, abstractmethod, abstractproperty - from functools import wraps -from ansible.compat.six import with_metaclass +from ansible.compat.six import with_metaclass from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins import shell_loader @@ -233,3 +233,4 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f)) + diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index a2abcf20ae..cce29824e1 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -19,7 +19,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.compat.six import text_type + +import base64 import fcntl +import hmac +import operator import os import pipes import pty @@ -28,9 +33,13 @@ import shlex import subprocess import time +from hashlib import md5, sha1, sha256 + from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.boolean import boolean +from ansible.utils.connection import get_smart_connection_type from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode @@ -41,7 +50,128 @@ except ImportError: display = Display() SSHPASS_AVAILABLE = None +HASHED_KEY_MAGIC = "|1|" +def split_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + +def get_ssh_opts(play_context): + # FIXME: caching may help here + opts_dict = dict() + try: + cmd = ['ssh', '-G', play_context.remote_addr] + res = subprocess.check_output(cmd) + for line in res.split('\n'): + if ' ' in line: + (key, val) = line.split(' ', 1) + else: + key = line + val = '' + opts_dict[key.lower()] = val + + # next, we manually override any options that are being + # set via ssh_args or due to the fact that `ssh -G` doesn't + # actually use the options set via -o + for opt in ['ssh_args', 'ssh_common_args', 'ssh_extra_args']: + attr = getattr(play_context, opt, None) + if attr is not None: + args = split_args(attr) + for arg in args: + if '=' in arg: + (key, val) = arg.split('=', 1) + opts_dict[key.lower()] = val + + return opts_dict + except subprocess.CalledProcessError: + return dict() + +def host_in_known_hosts(host, ssh_opts): + # the setting from the ssh_opts may actually be multiple files, so + # we use shlex.split and simply take the first one specified + user_host_file = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) + + host_file_list = [] + host_file_list.append(user_host_file) + host_file_list.append("/etc/ssh/ssh_known_hosts") + host_file_list.append("/etc/ssh/ssh_known_hosts2") + + hfiles_not_found = 0 + for hf in host_file_list: + if not os.path.exists(hf): + continue + try: + host_fh = open(hf) + except (OSError, IOError) as e: + continue + else: + data = host_fh.read() + host_fh.close() + + for line in data.split("\n"): + line = line.strip() + if line is None or " " not in line: + continue + tokens = line.split() + if not tokens: + continue + if tokens[0].find(HASHED_KEY_MAGIC) == 0: + # this is a hashed known host entry + try: + (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2) + hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) + hash.update(host) + if hash.digest() == kn_host.decode('base64'): + return True + except: + # invalid hashed host key, skip it + continue + else: + # standard host file entry + if host in tokens[0]: + return True + + return False + +def fetch_ssh_host_key(play_context, ssh_opts): + keyscan_cmd = ['ssh-keyscan'] + + if play_context.port: + keyscan_cmd.extend(['-p', text_type(play_context.port)]) + + if boolean(ssh_opts.get('hashknownhosts', 'no')): + keyscan_cmd.append('-H') + + keyscan_cmd.append(play_context.remote_addr) + + p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + (stdout, stderr) = p.communicate() + if stdout == '': + raise AnsibleConnectionFailure("Failed to connect to the host to fetch the host key: %s." % stderr) + else: + return stdout + +def add_host_key(host_key, ssh_opts): + # the setting from the ssh_opts may actually be multiple files, so + # we use shlex.split and simply take the first one specified + user_known_hosts = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) + user_ssh_dir = os.path.dirname(user_known_hosts) + + if not os.path.exists(user_ssh_dir): + raise AnsibleError("the user ssh directory does not exist: %s" % user_ssh_dir) + elif not os.path.isdir(user_ssh_dir): + raise AnsibleError("%s is not a directory" % user_ssh_dir) + + try: + display.vv("adding to known_hosts file: %s" % user_known_hosts) + with open(user_known_hosts, 'a') as f: + f.write(host_key) + except (OSError, IOError) as e: + raise AnsibleError("error when trying to access the known hosts file: '%s', error was: %s" % (user_known_hosts, text_type(e))) class Connection(ConnectionBase): ''' ssh based connections ''' @@ -62,6 +192,56 @@ class Connection(ConnectionBase): def _connect(self): return self + @staticmethod + def fetch_and_store_key(host, play_context): + ssh_opts = get_ssh_opts(play_context) + if not host_in_known_hosts(play_context.remote_addr, ssh_opts): + display.debug("host %s does not have a known host key, fetching it" % host) + + # build the list of valid host key types, for use later as we scan for keys. + # we also use this to determine the most preferred key when multiple keys are available + valid_host_key_types = [x.lower() for x in ssh_opts.get('hostbasedkeytypes', '').split(',')] + + # attempt to fetch the key with ssh-keyscan. More than one key may be + # returned, so we save all and use the above list to determine which + host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') + host_keys = dict() + for host_key in host_key_data: + (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) + key_type = key_type.lower() + if key_type in valid_host_key_types and key_type not in host_keys: + host_keys[key_type.lower()] = host_key + + if len(host_keys) == 0: + raise AnsibleConnectionFailure("none of the available host keys found were in the HostBasedKeyTypes configuration option") + + # now we determine the preferred key by sorting the above dict on the + # index of the key type in the valid keys list + preferred_key = sorted(host_keys.items(), cmp=lambda x,y: cmp(valid_host_key_types.index(x), valid_host_key_types.index(y)), key=operator.itemgetter(0))[0] + + # shamelessly copied from here: + # https://github.com/ojarva/python-sshpubkeys/blob/master/sshpubkeys/__init__.py#L39 + # (which shamelessly copied it from somewhere else...) + (host_info, key_type, key_hash) = preferred_key[1].strip().split(' ', 3) + decoded_key = key_hash.decode('base64') + fp_plain = md5(decoded_key).hexdigest() + key_data = ':'.join(a+b for a, b in zip(fp_plain[::2], fp_plain[1::2])) + + # prompt the user to add the key + # if yes, add it, otherwise raise AnsibleConnectionFailure + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) + display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) + display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) + response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") + display.display("") + if boolean(response): + add_host_key(host_key, ssh_opts) + return True + else: + raise AnsibleConnectionFailure("Host key validation failed.") + + return False + @staticmethod def _sshpass_available(): global SSHPASS_AVAILABLE @@ -100,15 +280,6 @@ class Connection(ConnectionBase): return controlpersist, controlpath - @staticmethod - def _split_args(argstring): - """ - Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a - list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to - the argument list. The list will not contain any empty elements. - """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] - def _add_args(self, explanation, args): """ Adds the given args to self._command and displays a caller-supplied @@ -157,7 +328,7 @@ class Connection(ConnectionBase): # Next, we add [ssh_connection]ssh_args from ansible.cfg. if self._play_context.ssh_args: - args = self._split_args(self._play_context.ssh_args) + args = split_args(self._play_context.ssh_args) self._add_args("ansible.cfg set ssh_args", args) # Now we add various arguments controlled by configuration file settings @@ -210,7 +381,7 @@ class Connection(ConnectionBase): for opt in ['ssh_common_args', binary + '_extra_args']: attr = getattr(self._play_context, opt, None) if attr is not None: - args = self._split_args(attr) + args = split_args(attr) self._add_args("PlayContext set %s" % opt, args) # Check if ControlPersist is enabled and add a ControlPath if one hasn't diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 7b2a3794ef..e460708f90 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -29,7 +29,7 @@ import zlib from jinja2.exceptions import UndefinedError from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable +from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult @@ -39,6 +39,7 @@ from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar +from ansible.utils.connection import get_smart_connection_type from ansible.vars.unsafe_proxy import wrap_var try: @@ -139,6 +140,33 @@ class StrategyBase: display.debug("entering _queue_task() for %s/%s" % (host, task)) + if C.HOST_KEY_CHECKING and not host.has_hostkey: + # caveat here, regarding with loops. It is assumed that none of the connection + # related variables would contain '{{item}}' as it would cause some really + # weird loops. As is, if someone did something odd like that they would need + # to disable host key checking + templar = Templar(loader=self._loader, variables=task_vars) + temp_pc = play_context.set_task_and_variable_override(task=task, variables=task_vars, templar=templar) + temp_pc.post_validate(templar) + if temp_pc.connection in ('smart', 'ssh') and get_smart_connection_type(temp_pc) == 'ssh': + try: + # get the ssh connection plugin's class, and use its builtin + # static method to fetch and save the key to the known_hosts file + ssh_conn = connection_loader.get('ssh', class_only=True) + ssh_conn.fetch_and_store_key(host, temp_pc) + except AnsibleConnectionFailure as e: + # if that fails, add the host to the list of unreachable + # hosts and send the appropriate callback + self._tqm._unreachable_hosts[host.name] = True + self._tqm._stats.increment('dark', host.name) + tr = TaskResult(host=host, task=task, return_data=dict(msg=text_type(e))) + self._tqm.send_callback('v2_runner_on_unreachable', tr) + return + + # finally, we set the has_hostkey flag to true for this + # host so we can skip it quickly in the future + host.has_hostkey = True + task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) diff --git a/lib/ansible/utils/connection.py b/lib/ansible/utils/connection.py new file mode 100644 index 0000000000..6f6b405640 --- /dev/null +++ b/lib/ansible/utils/connection.py @@ -0,0 +1,50 @@ +# (c) 2015, Ansible, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import subprocess +import sys + + +__all__ = ['get_smart_connection_type'] + +def get_smart_connection_type(play_context): + ''' + Uses the ssh command with the ControlPersist option while checking + for an error to determine if we should use ssh or paramiko. Also + may take other factors into account. + ''' + + conn_type = 'ssh' + if sys.platform.startswith('darwin') and play_context.password: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified + conn_type = "paramiko" + else: + # see if SSH can support ControlPersist if not use paramiko + try: + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err or "Usage:" in err: + conn_type = "paramiko" + except OSError: + conn_type = "paramiko" + + return conn_type From d7f2f606e179cf0df4d308a0055b4ad62207b47c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 21:49:33 -0500 Subject: [PATCH 308/590] Add has_hostkey to mock objects to fix broken unit tests --- test/units/plugins/strategies/test_strategy_base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 53e243f926..8d1a1e8ada 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -76,6 +76,7 @@ class TestStrategyBase(unittest.TestCase): for i in range(0, 5): mock_host = MagicMock() mock_host.name = "host%02d" % (i+1) + mock_host.has_hostkey = True mock_hosts.append(mock_host) mock_inventory = MagicMock() @@ -111,6 +112,7 @@ class TestStrategyBase(unittest.TestCase): fake_loader = DictDataLoader() mock_var_manager = MagicMock() mock_host = MagicMock() + mock_host.has_hostkey = True mock_inventory = MagicMock() mock_options = MagicMock() mock_options.module_path = None @@ -171,6 +173,7 @@ class TestStrategyBase(unittest.TestCase): mock_host = MagicMock() mock_host.name = 'test01' mock_host.vars = dict() + mock_host.has_hostkey = True mock_task = MagicMock() mock_task._role = None @@ -347,6 +350,7 @@ class TestStrategyBase(unittest.TestCase): mock_host = MagicMock(Host) mock_host.name = "test01" + mock_host.has_hostkey = True mock_inventory = MagicMock() mock_inventory.get_hosts.return_value = [mock_host] From d9c74536be63cedc3dd1711c73844827990e898d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 09:44:40 -0500 Subject: [PATCH 309/590] Fix handling of environment inheritence, and template each inherited env Environments were not being templated individually, so a variable environment value was causing the exception regarding dicts to be hit. Also, environments as inherited were coming through with the tasks listed first, followed by the parents, so they were being merged backwards. Reversing the list of environments fixed this. --- lib/ansible/plugins/action/__init__.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 254bab476b..e9b18651d6 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -151,14 +151,19 @@ class ActionBase(with_metaclass(ABCMeta, object)): if not isinstance(environments, list): environments = [ environments ] + # the environments as inherited need to be reversed, to make + # sure we merge in the parent's values first so those in the + # block then task 'win' in precedence + environments.reverse() for environment in environments: if environment is None: continue - if not isinstance(environment, dict): - raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment))) + temp_environment = self._templar.template(environment) + if not isinstance(temp_environment, dict): + raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment))) # very deliberately using update here instead of combine_vars, as # these environment settings should not need to merge sub-dicts - final_environment.update(environment) + final_environment.update(temp_environment) final_environment = self._templar.template(final_environment) return self._connection._shell.env_prefix(**final_environment) From dd3d04e96ab30bb0df89b5e3ab1ac9a9d91d5841 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 10:31:14 -0500 Subject: [PATCH 310/590] Adding pip install of virtualenv to test deps integration role --- .../roles/ansible_test_deps/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index f71128921d..5f75085d92 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -10,6 +10,9 @@ ignore_errors: true when: ansible_os_family == 'Debian' +- name: Install virtualenv + pip: name=virtualenv state=present + - name: Install RH epel yum: name="epel-release" state=installed sudo: true From 0b1ad8d4905fa83eddbc08e2a3dd395aa99b8aed Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 10:41:58 -0500 Subject: [PATCH 311/590] Switch virtualenv dep installation from pip to package manager --- .../roles/ansible_test_deps/tasks/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 5f75085d92..c9cb256a35 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -11,7 +11,12 @@ when: ansible_os_family == 'Debian' - name: Install virtualenv - pip: name=virtualenv state=present + yum: name=python-virtualenv state=installed + when: ansible_os_family == 'RedHat' + +- name: Install virtualenv + apt: name=python-virtualenv state=installed + when: ansible_os_family == 'Debian' - name: Install RH epel yum: name="epel-release" state=installed From cf3d503f790ddf7ba74bc768bd2faad7a550f5ee Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 11:00:54 -0500 Subject: [PATCH 312/590] Moving apt cache update to top to ensure cache is updated before deps installed --- .../roles/ansible_test_deps/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index c9cb256a35..c2fc955a16 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -1,5 +1,8 @@ --- +- apt: update_cache=yes + when: ansible_os_family == 'Debian' + - name: Install sudo yum: name=sudo state=installed ignore_errors: true @@ -42,9 +45,6 @@ - libselinux-python when: ansible_os_family == 'RedHat' -- apt: update_cache=yes - when: ansible_os_family == 'Debian' - - name: Install Debian ansible dependencies apt: name="{{ item }}" state=installed update_cache=yes sudo: true From 26bbabcfba637e17b36bb20d064c390cf0461e4d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 11:15:06 -0500 Subject: [PATCH 313/590] Consolidating package lines for virtualenv install in test deps integration --- .../roles/ansible_test_deps/tasks/main.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index c2fc955a16..ac133730ec 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -13,14 +13,6 @@ ignore_errors: true when: ansible_os_family == 'Debian' -- name: Install virtualenv - yum: name=python-virtualenv state=installed - when: ansible_os_family == 'RedHat' - -- name: Install virtualenv - apt: name=python-virtualenv state=installed - when: ansible_os_family == 'Debian' - - name: Install RH epel yum: name="epel-release" state=installed sudo: true @@ -43,6 +35,7 @@ - gcc - python-devel - libselinux-python + - python-virtualenv when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies @@ -57,6 +50,7 @@ - git - unzip - python-dev + - python-virtualenv when: ansible_os_family == 'Debian' - name: Install ubuntu 12.04 ansible dependencies From 21c127c5813c800204c729d84188f1e6d7bae3e7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 12:06:17 -0500 Subject: [PATCH 314/590] Fixing bugs in ssh known_host fetching * If remote_addr is not set in the PlayContext, use the host.address field instead (which is how the action plugin works) Fixes #13581 --- lib/ansible/plugins/connection/ssh.py | 29 +++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index cce29824e1..c24d166734 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -60,11 +60,15 @@ def split_args(argstring): """ return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] -def get_ssh_opts(play_context): +def get_ssh_opts(host, play_context): # FIXME: caching may help here opts_dict = dict() try: - cmd = ['ssh', '-G', play_context.remote_addr] + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + cmd = ['ssh', '-G', remote_addr] res = subprocess.check_output(cmd) for line in res.split('\n'): if ' ' in line: @@ -137,7 +141,7 @@ def host_in_known_hosts(host, ssh_opts): return False -def fetch_ssh_host_key(play_context, ssh_opts): +def fetch_ssh_host_key(host, play_context, ssh_opts): keyscan_cmd = ['ssh-keyscan'] if play_context.port: @@ -146,7 +150,11 @@ def fetch_ssh_host_key(play_context, ssh_opts): if boolean(ssh_opts.get('hashknownhosts', 'no')): keyscan_cmd.append('-H') - keyscan_cmd.append(play_context.remote_addr) + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + keyscan_cmd.append(remote_addr) p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (stdout, stderr) = p.communicate() @@ -194,8 +202,13 @@ class Connection(ConnectionBase): @staticmethod def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(play_context) - if not host_in_known_hosts(play_context.remote_addr, ssh_opts): + ssh_opts = get_ssh_opts(host, play_context) + + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + if not host_in_known_hosts(remote_addr, ssh_opts): display.debug("host %s does not have a known host key, fetching it" % host) # build the list of valid host key types, for use later as we scan for keys. @@ -204,7 +217,7 @@ class Connection(ConnectionBase): # attempt to fetch the key with ssh-keyscan. More than one key may be # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') + host_key_data = fetch_ssh_host_key(host, play_context, ssh_opts).strip().split('\n') host_keys = dict() for host_key in host_key_data: (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) @@ -229,7 +242,7 @@ class Connection(ConnectionBase): # prompt the user to add the key # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, remote_addr)) display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") From 8db4415e2e95e5993822b4f75e700dd14a928ad9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 17 Dec 2015 12:25:29 -0500 Subject: [PATCH 315/590] changed test to use filter for accurate reporting --- test/integration/roles/test_service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index c0e590643c..8b61d62143 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -98,7 +98,7 @@ - name: assert that the broken test failed assert: that: - - "broken_enable_result.failed == True" + - "broken_enable_result|failed" - name: remove the test daemon script file: path=/usr/sbin/ansible_test_service state=absent From 586208234cc921acc70fbe1fff211707ceba0c7a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 12:42:53 -0500 Subject: [PATCH 316/590] Revert "Fixing bugs in ssh known_host fetching" This reverts commit 21c127c5813c800204c729d84188f1e6d7bae3e7. --- lib/ansible/plugins/connection/ssh.py | 29 ++++++++------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index c24d166734..cce29824e1 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -60,15 +60,11 @@ def split_args(argstring): """ return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] -def get_ssh_opts(host, play_context): +def get_ssh_opts(play_context): # FIXME: caching may help here opts_dict = dict() try: - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - cmd = ['ssh', '-G', remote_addr] + cmd = ['ssh', '-G', play_context.remote_addr] res = subprocess.check_output(cmd) for line in res.split('\n'): if ' ' in line: @@ -141,7 +137,7 @@ def host_in_known_hosts(host, ssh_opts): return False -def fetch_ssh_host_key(host, play_context, ssh_opts): +def fetch_ssh_host_key(play_context, ssh_opts): keyscan_cmd = ['ssh-keyscan'] if play_context.port: @@ -150,11 +146,7 @@ def fetch_ssh_host_key(host, play_context, ssh_opts): if boolean(ssh_opts.get('hashknownhosts', 'no')): keyscan_cmd.append('-H') - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - keyscan_cmd.append(remote_addr) + keyscan_cmd.append(play_context.remote_addr) p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (stdout, stderr) = p.communicate() @@ -202,13 +194,8 @@ class Connection(ConnectionBase): @staticmethod def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(host, play_context) - - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - if not host_in_known_hosts(remote_addr, ssh_opts): + ssh_opts = get_ssh_opts(play_context) + if not host_in_known_hosts(play_context.remote_addr, ssh_opts): display.debug("host %s does not have a known host key, fetching it" % host) # build the list of valid host key types, for use later as we scan for keys. @@ -217,7 +204,7 @@ class Connection(ConnectionBase): # attempt to fetch the key with ssh-keyscan. More than one key may be # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(host, play_context, ssh_opts).strip().split('\n') + host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') host_keys = dict() for host_key in host_key_data: (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) @@ -242,7 +229,7 @@ class Connection(ConnectionBase): # prompt the user to add the key # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, remote_addr)) + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") From e5462194261c7b55ccdf41adc4525dc86a1a34c1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 12:43:36 -0500 Subject: [PATCH 317/590] Revert "Enable host_key checking at the strategy level" This reverts commit 1a6d660d7e285cceec474952a33af4d8dffd0a8d. --- lib/ansible/executor/task_executor.py | 17 +- lib/ansible/inventory/host.py | 11 +- lib/ansible/plugins/connection/__init__.py | 5 +- lib/ansible/plugins/connection/ssh.py | 193 ++------------------- lib/ansible/plugins/strategy/__init__.py | 30 +--- lib/ansible/utils/connection.py | 50 ------ 6 files changed, 33 insertions(+), 273 deletions(-) delete mode 100644 lib/ansible/utils/connection.py diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 2623bc775b..5d7430fad2 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -32,7 +32,6 @@ from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVar from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.template import Templar -from ansible.utils.connection import get_smart_connection_type from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode @@ -565,7 +564,21 @@ class TaskExecutor: conn_type = self._play_context.connection if conn_type == 'smart': - conn_type = get_smart_connection_type(self._play_context) + conn_type = 'ssh' + if sys.platform.startswith('darwin') and self._play_context.password: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified + conn_type = "paramiko" + else: + # see if SSH can support ControlPersist if not use paramiko + try: + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err or "Usage:" in err: + conn_type = "paramiko" + except OSError: + conn_type = "paramiko" connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin) if not connection: diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 70f9f57b5f..6263dcbc80 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -57,7 +57,6 @@ class Host: name=self.name, vars=self.vars.copy(), address=self.address, - has_hostkey=self.has_hostkey, uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, @@ -66,11 +65,10 @@ class Host: def deserialize(self, data): self.__init__() - self.name = data.get('name') - self.vars = data.get('vars', dict()) - self.address = data.get('address', '') - self.has_hostkey = data.get('has_hostkey', False) - self._uuid = data.get('uuid', uuid.uuid4()) + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.address = data.get('address', '') + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -91,7 +89,6 @@ class Host: self._gathered_facts = False self._uuid = uuid.uuid4() - self.has_hostkey = False def __repr__(self): return self.get_name() diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 7fc19c8c19..06616bac4c 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -23,11 +23,11 @@ __metaclass__ = type import fcntl import gettext import os - from abc import ABCMeta, abstractmethod, abstractproperty -from functools import wraps +from functools import wraps from ansible.compat.six import with_metaclass + from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins import shell_loader @@ -233,4 +233,3 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f)) - diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index cce29824e1..a2abcf20ae 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -19,12 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.compat.six import text_type - -import base64 import fcntl -import hmac -import operator import os import pipes import pty @@ -33,13 +28,9 @@ import shlex import subprocess import time -from hashlib import md5, sha1, sha256 - from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase -from ansible.utils.boolean import boolean -from ansible.utils.connection import get_smart_connection_type from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode @@ -50,128 +41,7 @@ except ImportError: display = Display() SSHPASS_AVAILABLE = None -HASHED_KEY_MAGIC = "|1|" -def split_args(argstring): - """ - Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a - list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to - the argument list. The list will not contain any empty elements. - """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] - -def get_ssh_opts(play_context): - # FIXME: caching may help here - opts_dict = dict() - try: - cmd = ['ssh', '-G', play_context.remote_addr] - res = subprocess.check_output(cmd) - for line in res.split('\n'): - if ' ' in line: - (key, val) = line.split(' ', 1) - else: - key = line - val = '' - opts_dict[key.lower()] = val - - # next, we manually override any options that are being - # set via ssh_args or due to the fact that `ssh -G` doesn't - # actually use the options set via -o - for opt in ['ssh_args', 'ssh_common_args', 'ssh_extra_args']: - attr = getattr(play_context, opt, None) - if attr is not None: - args = split_args(attr) - for arg in args: - if '=' in arg: - (key, val) = arg.split('=', 1) - opts_dict[key.lower()] = val - - return opts_dict - except subprocess.CalledProcessError: - return dict() - -def host_in_known_hosts(host, ssh_opts): - # the setting from the ssh_opts may actually be multiple files, so - # we use shlex.split and simply take the first one specified - user_host_file = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) - - host_file_list = [] - host_file_list.append(user_host_file) - host_file_list.append("/etc/ssh/ssh_known_hosts") - host_file_list.append("/etc/ssh/ssh_known_hosts2") - - hfiles_not_found = 0 - for hf in host_file_list: - if not os.path.exists(hf): - continue - try: - host_fh = open(hf) - except (OSError, IOError) as e: - continue - else: - data = host_fh.read() - host_fh.close() - - for line in data.split("\n"): - line = line.strip() - if line is None or " " not in line: - continue - tokens = line.split() - if not tokens: - continue - if tokens[0].find(HASHED_KEY_MAGIC) == 0: - # this is a hashed known host entry - try: - (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2) - hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) - hash.update(host) - if hash.digest() == kn_host.decode('base64'): - return True - except: - # invalid hashed host key, skip it - continue - else: - # standard host file entry - if host in tokens[0]: - return True - - return False - -def fetch_ssh_host_key(play_context, ssh_opts): - keyscan_cmd = ['ssh-keyscan'] - - if play_context.port: - keyscan_cmd.extend(['-p', text_type(play_context.port)]) - - if boolean(ssh_opts.get('hashknownhosts', 'no')): - keyscan_cmd.append('-H') - - keyscan_cmd.append(play_context.remote_addr) - - p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) - (stdout, stderr) = p.communicate() - if stdout == '': - raise AnsibleConnectionFailure("Failed to connect to the host to fetch the host key: %s." % stderr) - else: - return stdout - -def add_host_key(host_key, ssh_opts): - # the setting from the ssh_opts may actually be multiple files, so - # we use shlex.split and simply take the first one specified - user_known_hosts = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) - user_ssh_dir = os.path.dirname(user_known_hosts) - - if not os.path.exists(user_ssh_dir): - raise AnsibleError("the user ssh directory does not exist: %s" % user_ssh_dir) - elif not os.path.isdir(user_ssh_dir): - raise AnsibleError("%s is not a directory" % user_ssh_dir) - - try: - display.vv("adding to known_hosts file: %s" % user_known_hosts) - with open(user_known_hosts, 'a') as f: - f.write(host_key) - except (OSError, IOError) as e: - raise AnsibleError("error when trying to access the known hosts file: '%s', error was: %s" % (user_known_hosts, text_type(e))) class Connection(ConnectionBase): ''' ssh based connections ''' @@ -192,56 +62,6 @@ class Connection(ConnectionBase): def _connect(self): return self - @staticmethod - def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(play_context) - if not host_in_known_hosts(play_context.remote_addr, ssh_opts): - display.debug("host %s does not have a known host key, fetching it" % host) - - # build the list of valid host key types, for use later as we scan for keys. - # we also use this to determine the most preferred key when multiple keys are available - valid_host_key_types = [x.lower() for x in ssh_opts.get('hostbasedkeytypes', '').split(',')] - - # attempt to fetch the key with ssh-keyscan. More than one key may be - # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') - host_keys = dict() - for host_key in host_key_data: - (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) - key_type = key_type.lower() - if key_type in valid_host_key_types and key_type not in host_keys: - host_keys[key_type.lower()] = host_key - - if len(host_keys) == 0: - raise AnsibleConnectionFailure("none of the available host keys found were in the HostBasedKeyTypes configuration option") - - # now we determine the preferred key by sorting the above dict on the - # index of the key type in the valid keys list - preferred_key = sorted(host_keys.items(), cmp=lambda x,y: cmp(valid_host_key_types.index(x), valid_host_key_types.index(y)), key=operator.itemgetter(0))[0] - - # shamelessly copied from here: - # https://github.com/ojarva/python-sshpubkeys/blob/master/sshpubkeys/__init__.py#L39 - # (which shamelessly copied it from somewhere else...) - (host_info, key_type, key_hash) = preferred_key[1].strip().split(' ', 3) - decoded_key = key_hash.decode('base64') - fp_plain = md5(decoded_key).hexdigest() - key_data = ':'.join(a+b for a, b in zip(fp_plain[::2], fp_plain[1::2])) - - # prompt the user to add the key - # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) - display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) - display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) - response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") - display.display("") - if boolean(response): - add_host_key(host_key, ssh_opts) - return True - else: - raise AnsibleConnectionFailure("Host key validation failed.") - - return False - @staticmethod def _sshpass_available(): global SSHPASS_AVAILABLE @@ -280,6 +100,15 @@ class Connection(ConnectionBase): return controlpersist, controlpath + @staticmethod + def _split_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + def _add_args(self, explanation, args): """ Adds the given args to self._command and displays a caller-supplied @@ -328,7 +157,7 @@ class Connection(ConnectionBase): # Next, we add [ssh_connection]ssh_args from ansible.cfg. if self._play_context.ssh_args: - args = split_args(self._play_context.ssh_args) + args = self._split_args(self._play_context.ssh_args) self._add_args("ansible.cfg set ssh_args", args) # Now we add various arguments controlled by configuration file settings @@ -381,7 +210,7 @@ class Connection(ConnectionBase): for opt in ['ssh_common_args', binary + '_extra_args']: attr = getattr(self._play_context, opt, None) if attr is not None: - args = split_args(attr) + args = self._split_args(attr) self._add_args("PlayContext set %s" % opt, args) # Check if ControlPersist is enabled and add a ControlPath if one hasn't diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index e460708f90..7b2a3794ef 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -29,7 +29,7 @@ import zlib from jinja2.exceptions import UndefinedError from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure +from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult @@ -39,7 +39,6 @@ from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar -from ansible.utils.connection import get_smart_connection_type from ansible.vars.unsafe_proxy import wrap_var try: @@ -140,33 +139,6 @@ class StrategyBase: display.debug("entering _queue_task() for %s/%s" % (host, task)) - if C.HOST_KEY_CHECKING and not host.has_hostkey: - # caveat here, regarding with loops. It is assumed that none of the connection - # related variables would contain '{{item}}' as it would cause some really - # weird loops. As is, if someone did something odd like that they would need - # to disable host key checking - templar = Templar(loader=self._loader, variables=task_vars) - temp_pc = play_context.set_task_and_variable_override(task=task, variables=task_vars, templar=templar) - temp_pc.post_validate(templar) - if temp_pc.connection in ('smart', 'ssh') and get_smart_connection_type(temp_pc) == 'ssh': - try: - # get the ssh connection plugin's class, and use its builtin - # static method to fetch and save the key to the known_hosts file - ssh_conn = connection_loader.get('ssh', class_only=True) - ssh_conn.fetch_and_store_key(host, temp_pc) - except AnsibleConnectionFailure as e: - # if that fails, add the host to the list of unreachable - # hosts and send the appropriate callback - self._tqm._unreachable_hosts[host.name] = True - self._tqm._stats.increment('dark', host.name) - tr = TaskResult(host=host, task=task, return_data=dict(msg=text_type(e))) - self._tqm.send_callback('v2_runner_on_unreachable', tr) - return - - # finally, we set the has_hostkey flag to true for this - # host so we can skip it quickly in the future - host.has_hostkey = True - task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) diff --git a/lib/ansible/utils/connection.py b/lib/ansible/utils/connection.py deleted file mode 100644 index 6f6b405640..0000000000 --- a/lib/ansible/utils/connection.py +++ /dev/null @@ -1,50 +0,0 @@ -# (c) 2015, Ansible, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import subprocess -import sys - - -__all__ = ['get_smart_connection_type'] - -def get_smart_connection_type(play_context): - ''' - Uses the ssh command with the ControlPersist option while checking - for an error to determine if we should use ssh or paramiko. Also - may take other factors into account. - ''' - - conn_type = 'ssh' - if sys.platform.startswith('darwin') and play_context.password: - # due to a current bug in sshpass on OSX, which can trigger - # a kernel panic even for non-privileged users, we revert to - # paramiko on that OS when a SSH password is specified - conn_type = "paramiko" - else: - # see if SSH can support ControlPersist if not use paramiko - try: - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err or "Usage:" in err: - conn_type = "paramiko" - except OSError: - conn_type = "paramiko" - - return conn_type From 1b5e7ce0253c896f5166b5ffd1c2614090cc75a1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 10:23:02 -0800 Subject: [PATCH 318/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 16a3bdaa7d..c75c000369 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 16a3bdaa7da9e9f7c0572d3a3fdbfd79f29c2b9d +Subproject commit c75c0003697d00f52cedb68d4c1b05b7e95991e0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8ec4f95ffd..06bdec0cac 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8ec4f95ffd6d4e837cf0f3dd28649fb09afd0caf +Subproject commit 06bdec0cac86ef2339e0b4d8a4616ee24619956f From ce1febe28bb538c9d6db59449caf4da9dcf23f7e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 11:25:45 -0800 Subject: [PATCH 319/590] debug line needs var not msg --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 09ee34277a..640c987790 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -78,7 +78,7 @@ # If distros start backporting SNI, can make a new conditional based on whether this works: # python -c 'from ssl import SSLContext' -- debug: msg=get_url_result +- debug: var=get_url_result - name: Assert that SNI works with this python version assert: that: From bad1c173b87a7b68fc0ae79b35376fc31e8cc5d7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 11:36:36 -0800 Subject: [PATCH 320/590] Update core submodule for mysql_db fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c75c000369..b4a3fdd493 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c75c0003697d00f52cedb68d4c1b05b7e95991e0 +Subproject commit b4a3fdd493378853c0b6ab35d5d8bcf52612a4a0 From 8c6f56f982fce50d5b030928e425740a30d4f86c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 11:46:26 -0800 Subject: [PATCH 321/590] kennetreitz.org times out but www.kennethreitz.org is fine --- test/integration/roles/test_lookups/tasks/main.yml | 6 +++--- test/integration/roles/test_uri/tasks/main.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 5ca29e27c1..3c5e066ee3 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -177,7 +177,7 @@ - name: Test that retrieving a url with invalid cert fails set_fact: - web_data: "{{ lookup('url', 'https://kennethreitz.org/') }}" + web_data: "{{ lookup('url', 'https://www.kennethreitz.org/') }}" ignore_errors: True register: url_invalid_cert @@ -188,9 +188,9 @@ - name: Test that retrieving a url with invalid cert with validate_certs=False works set_fact: - web_data: "{{ lookup('url', 'https://kennethreitz.org/', validate_certs=False) }}" + web_data: "{{ lookup('url', 'https://www.kennethreitz.org/', validate_certs=False) }}" register: url_no_validate_cert - assert: that: - - "'kennethreitz.org' in web_data" + - "'www.kennethreitz.org' in web_data" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 7300578982..18229e6b7c 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -94,7 +94,7 @@ - name: test https fetch to a site with mismatched hostname and certificate uri: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result @@ -117,7 +117,7 @@ - name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result From 5929ffc7c3b79b830edeebdb8542b53c3c0a15b3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 16:01:56 -0500 Subject: [PATCH 322/590] Make --list-tasks respect tags Also makes the output closer to the appearance of v1 Fixes #13260 --- lib/ansible/cli/playbook.py | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index a9c0ed018d..e51d5d3993 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -30,6 +30,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.inventory import Inventory from ansible.parsing.dataloader import DataLoader +from ansible.playbook.play_context import PlayContext from ansible.utils.vars import load_extra_vars from ansible.vars import VariableManager @@ -152,18 +153,10 @@ class PlaybookCLI(CLI): for p in results: display.display('\nplaybook: %s' % p['playbook']) - i = 1 - for play in p['plays']: - if play.name: - playname = play.name - else: - playname = '#' + str(i) - - msg = "\n PLAY: %s" % (playname) - mytags = set() - if self.options.listtags and play.tags: - mytags = mytags.union(set(play.tags)) - msg += ' TAGS: [%s]' % (','.join(mytags)) + for idx, play in enumerate(p['plays']): + msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) + mytags = set(play.tags) + msg += ' TAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) @@ -176,20 +169,21 @@ class PlaybookCLI(CLI): if self.options.listtags or self.options.listtasks: taskmsg = ' tasks:' + all_vars = variable_manager.get_vars(loader=loader, play=play) + play_context = PlayContext(play=play, options=self.options) for block in play.compile(): + block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue - j = 1 for task in block.block: - taskmsg += "\n %s" % task - if self.options.listtags and task.tags: - taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) - j = j + 1 + if task.action == 'meta': + continue + taskmsg += "\n %s" % task.get_name() + taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) display.display(taskmsg) - i = i + 1 return 0 else: return results From d4ffc96c8039e5a79baf23be173d03c2e4c8565f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 16:30:23 -0500 Subject: [PATCH 323/590] Further tweaks to the output format of list tasks/tags --- lib/ansible/cli/playbook.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index e51d5d3993..d307abdfcc 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -156,7 +156,7 @@ class PlaybookCLI(CLI): for idx, play in enumerate(p['plays']): msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) mytags = set(play.tags) - msg += ' TAGS: [%s]' % (','.join(mytags)) + msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) @@ -166,8 +166,11 @@ class PlaybookCLI(CLI): display.display(msg) + all_tags = set() if self.options.listtags or self.options.listtasks: - taskmsg = ' tasks:' + taskmsg = '' + if self.options.listtasks: + taskmsg = ' tasks:\n' all_vars = variable_manager.get_vars(loader=loader, play=play) play_context = PlayContext(play=play, options=self.options) @@ -179,8 +182,18 @@ class PlaybookCLI(CLI): for task in block.block: if task.action == 'meta': continue - taskmsg += "\n %s" % task.get_name() - taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) + + all_tags.update(task.tags) + if self.options.listtasks: + cur_tags = list(mytags.union(set(task.tags))) + cur_tags.sort() + taskmsg += " %s" % task.action + taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) + + if self.options.listtags: + cur_tags = list(mytags.union(all_tags)) + cur_tags.sort() + taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags) display.display(taskmsg) From 4ba7158282f148c90c72f824d6ebcd1a9953b580 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 16:33:23 -0500 Subject: [PATCH 324/590] Fixing a mistake from tweaking list stuff too much Use the action only if the task name is not set --- lib/ansible/cli/playbook.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index d307abdfcc..dfd06b1920 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -187,7 +187,10 @@ class PlaybookCLI(CLI): if self.options.listtasks: cur_tags = list(mytags.union(set(task.tags))) cur_tags.sort() - taskmsg += " %s" % task.action + if task.name: + taskmsg += " %s" % task.get_name() + else: + taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) if self.options.listtags: From 3057fc1753eff42fb073ae866734cb9127cbd25a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 13:46:15 -0800 Subject: [PATCH 325/590] Update submodule ref for mysql_user fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b4a3fdd493..9366dfb63e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b4a3fdd493378853c0b6ab35d5d8bcf52612a4a0 +Subproject commit 9366dfb63e565c9e0901d714be8832fc89b275d6 From c5eda277ac6ca50cf593a724a368ad973d1a3935 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 17:51:42 -0800 Subject: [PATCH 326/590] Fix get_url tests in light of distros backporting SNI support --- .../roles/test_get_url/tasks/main.yml | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 640c987790..d7885f0905 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -16,6 +16,21 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +- name: Determine if python looks like it will support modern ssl features like SNI + command: python -c 'from ssl import SSLContext' + ignore_errors: True + register: python_test + +- name: Set python_has_sslcontext if we have it + set_fact: + python_has_ssl_context: True + when: python_test.rc == 0 + +- name: Set python_has_sslcontext False if we don't have it + set_fact: + python_has_ssl_context: False + when: python_test.rc != 0 + - name: test https fetch get_url: url="https://raw.githubusercontent.com/ansible/ansible/devel/README.md" dest={{output_dir}}/get_url.txt force=yes register: result @@ -74,7 +89,7 @@ - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result - when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + when: "{{ python_has_ssl_context }}" # If distros start backporting SNI, can make a new conditional based on whether this works: # python -c 'from ssl import SSLContext' @@ -84,11 +99,11 @@ that: - 'data_result.rc == 0' - '"failed" not in get_url_result' - when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + when: "{{ python_has_ssl_context }}" # If the client doesn't support SNI then get_url should have failed with a certificate mismatch - name: Assert that hostname verification failed because SNI is not supported on this version of python assert: that: - 'get_url_result["failed"]' - when: "{{ ansible_python_version | version_compare('2.7.9', '<') }}" + when: "{{ not python_has_ssl_context }}" From 12c0bb9414224517c6b15ec1d58aedd45d40703d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 20:52:49 -0500 Subject: [PATCH 327/590] Use --source instead of -e for awk in integration Makefile --- test/integration/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index a2d91f96f1..dcd30f0b83 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -193,5 +193,5 @@ test_lookup_paths: no_log: # This test expects 7 loggable vars and 0 non loggable ones, if either mismatches it fails, run the ansible-playbook command to debug - [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk -e 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] + [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk --source 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] From 1f3eec293bad4add2e52fbc52a7bbdcc912c3ab8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 20:06:53 -0800 Subject: [PATCH 328/590] Install an updated version of pycrypto on Ubuntu12 from pip --- .../roles/ansible_test_deps/tasks/main.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index ac133730ec..0b9e58c659 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -67,6 +67,14 @@ - rubygems-integration when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "14.04" +# Not sure why CentOS 6 is working without this.... +#- name: Install Red Hat 6 ansible dependencies +# yum: name="{{ item }}" state=installed +# sudo: true +# with_items: +# - python-crypto2.6 +# when: ansible_distribution in ('CentOS', 'RedHat') and ansible_distribution_major_version == "6" + - name: Install ansible pip deps sudo: true pip: name="{{ item }}" @@ -75,6 +83,13 @@ - Jinja2 - paramiko +- name: Install ubuntu 12.04 ansible pip deps + sudo: true + pip: name="{{ item }}" + with_items: + - pycrypto + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" + - name: Remove tty sudo requirement sudo: true lineinfile: "dest=/etc/sudoers regexp='^Defaults[ , ]*requiretty' line='#Defaults requiretty'" From 3143b352c53e2beeecec996d4ca80fa7a4293f93 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 23:07:28 -0500 Subject: [PATCH 329/590] Add ca-certificates update to the integration deps playbook --- .../roles/ansible_test_deps/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 0b9e58c659..85fad6a7fb 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -53,6 +53,10 @@ - python-virtualenv when: ansible_os_family == 'Debian' +- name: update ca certificates + yum: name=ca-certificates state=latest + when: ansible_os_family == 'RedHat' + - name: Install ubuntu 12.04 ansible dependencies apt: name="{{ item }}" state=installed update_cache=yes sudo: true From a391d6f89ab906d585e623f58789b39fb0797faf Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 20:09:48 -0800 Subject: [PATCH 330/590] Add state=latest to pip install of pycrypto --- .../roles/ansible_test_deps/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 85fad6a7fb..897a4e54ed 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -89,7 +89,7 @@ - name: Install ubuntu 12.04 ansible pip deps sudo: true - pip: name="{{ item }}" + pip: name="{{ item }}" state=latest with_items: - pycrypto when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" From 44e30e49dd4b678ff21d308d0e8b00b769de75e1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 07:47:23 -0500 Subject: [PATCH 331/590] Add awk to integration test deps list --- .../roles/ansible_test_deps/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 897a4e54ed..25b19d040e 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -33,6 +33,7 @@ - openssl - make - gcc + - gawk - python-devel - libselinux-python - python-virtualenv @@ -49,6 +50,7 @@ - mercurial - git - unzip + - gawk - python-dev - python-virtualenv when: ansible_os_family == 'Debian' From 1debc2da44e05282fea216e4b6e14e83d50bb4ea Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 10:34:27 -0500 Subject: [PATCH 332/590] Do a full yum update to make sure packages are latest version For the deps setup of integration tests, as we sometimes see odd errors we can't reproduce, which may be related to slightly out of date package dependencies. --- .../roles/ansible_test_deps/tasks/main.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 25b19d040e..17198cdc41 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -55,8 +55,12 @@ - python-virtualenv when: ansible_os_family == 'Debian' -- name: update ca certificates - yum: name=ca-certificates state=latest +#- name: update ca certificates +# yum: name=ca-certificates state=latest +# when: ansible_os_family == 'RedHat' + +- name: update all rpm packages + yum: name=* state=latest when: ansible_os_family == 'RedHat' - name: Install ubuntu 12.04 ansible dependencies From a3dcb910b8b8ad1c1ff65c31102cccd68ed31bf9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 10:58:55 -0500 Subject: [PATCH 333/590] Fixing bugs with {changed,failed}_when and until with registered vars * Saving of the registered variable was occuring after the tests for changed/failed_when. * Each of the above fields and until were being post_validated too early, so variables which were not defined at that time were causing task failures. Fixes #13591 --- lib/ansible/executor/task_executor.py | 11 +++++------ lib/ansible/playbook/task.py | 21 +++++++++++++++++++++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 5d7430fad2..b0a5157a52 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -387,7 +387,6 @@ class TaskExecutor: # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions - #vars_copy = variables.copy() vars_copy = variables.copy() display.debug("starting attempt loop") @@ -404,6 +403,11 @@ class TaskExecutor: return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") + # update the local copy of vars with the registered value, if specified, + # or any facts which may have been generated by the module execution + if self._task.register: + vars_copy[self._task.register] = result + if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result @@ -433,11 +437,6 @@ class TaskExecutor: return failed_when_result return False - # update the local copy of vars with the registered value, if specified, - # or any facts which may have been generated by the module execution - if self._task.register: - vars_copy[self._task.register] = result - if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 17f1952e39..825ee50269 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -260,6 +260,27 @@ class Task(Base, Conditional, Taggable, Become): break return templar.template(value, convert_bare=True) + def _post_validate_changed_when(self, attr, value, templar): + ''' + changed_when is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + + def _post_validate_failed_when(self, attr, value, templar): + ''' + failed_when is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + + def _post_validate_until(self, attr, value, templar): + ''' + until is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + def get_vars(self): all_vars = dict() if self._block: From f2364ecf5f9abcb11112dc7fe7c7eaffb6703bd1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 08:10:57 -0800 Subject: [PATCH 334/590] Add a Fedora latest host into the mix --- test/utils/ansible-playbook_integration_runner/main.yml | 7 ++++++- .../roles/ansible_test_deps/tasks/main.yml | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 5d15541490..9bcda9c71e 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -22,7 +22,12 @@ image: "ami-96a818fe" ssh_user: "centos" platform: "centos-7-x86_64" - + - distribution: "Fedora" + version: "23" + image: "ami-518bfb3b" + ssh_user: "fedora" + platform: "fedora-23-x86_64" + tasks: - debug: var=ansible_version - include: ec2.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 17198cdc41..16bdde79a0 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -16,10 +16,10 @@ - name: Install RH epel yum: name="epel-release" state=installed sudo: true - when: ansible_os_family == 'RedHat' + when: ansible_distribution in ('CentOS', 'RedHat') - name: Install RH ansible dependencies - yum: name="{{ item }}" state=installed + package: name="{{ item }}" state=installed sudo: true with_items: - python-pip From 0c154e81f055e07c78acedc8ac310a8011ff8274 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 11:30:14 -0500 Subject: [PATCH 335/590] Make integration tests run in parallel with async --- .../roles/run_integration/tasks/main.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 2114567d15..980d4a4d32 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -10,11 +10,21 @@ register: results - shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + async: 3600 + poll: 0 + register: async_test_results sudo: true environment: TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" args: chdir: "{{ results.stdout }}/ansible" + +- name: poll for test results + async_status: + jid: "{{async_test_results.ansible_job_id}}" register: test_results + until: test_results.finished + retries: 360 + wait: 10 ignore_errors: true From 73a0153b8e3e26ac095e140f6ffa6f8a1d756ff6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 12:44:57 -0500 Subject: [PATCH 336/590] Fix typo in integration test runner role --- .../roles/run_integration/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 980d4a4d32..3eba828544 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -26,5 +26,5 @@ register: test_results until: test_results.finished retries: 360 - wait: 10 + delay: 10 ignore_errors: true From 5d798c2725475b045fb06b46cba08c39bfcfeda8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 18 Dec 2015 12:14:03 -0500 Subject: [PATCH 337/590] added missing features to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 005171ec9a..0a5e7e2b7c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -371,6 +371,8 @@ allowed in future versions: explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. * ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. +* random cows are more random +* when: now gets the registered var after the first iteration, making it possible to break out of item loops * Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: ``` From 5dbd7c18a1011e5bc922731574815c22a80d5bc6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 18 Dec 2015 13:57:58 -0500 Subject: [PATCH 338/590] added note about add_hosts --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a5e7e2b7c..17180993a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -370,6 +370,7 @@ allowed in future versions: * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. +* add_hosts is much stricter about host name and will prevent invalid names from being added. * ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. * random cows are more random * when: now gets the registered var after the first iteration, making it possible to break out of item loops From 1cc83dd0d968c264c3da4982aa2a658d2e4aeb51 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 11:50:06 -0800 Subject: [PATCH 339/590] Make tests that use kennethreitz retry. --- test/integration/roles/test_get_url/tasks/main.yml | 9 +++++++++ test/integration/roles/test_uri/tasks/main.yml | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index d7885f0905..cbf3b345f1 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -47,6 +47,12 @@ dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result + # kennethreitz having trouble staying up. Eventually need to install our own + # certs & web server to test this... also need to install and test it with + # a proxy so the complications are inevitable + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/shouldnotexist.html" @@ -65,6 +71,9 @@ dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/kreitz.html" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 18229e6b7c..9ce05938b6 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -98,6 +98,12 @@ dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result + # kennethreitz having trouble staying up. Eventually need to install our own + # certs & web server to test this... also need to install and test it with + # a proxy so the complications are inevitable + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/shouldnotexist.html" @@ -121,6 +127,9 @@ dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/kreitz.html" From 02f65eaa805f39a15e35a813bcd6a1fdc24ade8c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 14:59:05 -0500 Subject: [PATCH 340/590] Make integration runner ec2 add_hosts use valid host names --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 59e15f0da1..d4740d9570 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -30,7 +30,7 @@ - name: Add hosts group temporary inventory group with pem path add_host: - name: "{{ item.1.platform }} {{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" groups: dynamic_hosts ansible_ssh_host: "{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" ansible_ssh_private_key_file: '{{ pem_path }}' From 0823a2c16f923bd950399dd879b5440356cb8411 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 15:33:44 -0500 Subject: [PATCH 341/590] Removing update all for test deps, it didn't fix the problem --- .../roles/ansible_test_deps/tasks/main.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 16bdde79a0..234eb70f92 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -55,12 +55,8 @@ - python-virtualenv when: ansible_os_family == 'Debian' -#- name: update ca certificates -# yum: name=ca-certificates state=latest -# when: ansible_os_family == 'RedHat' - -- name: update all rpm packages - yum: name=* state=latest +- name: update ca certificates + yum: name=ca-certificates state=latest when: ansible_os_family == 'RedHat' - name: Install ubuntu 12.04 ansible dependencies From 68fe3d856f3a58d4cf84053a803bb5e286d61773 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 14:04:51 -0800 Subject: [PATCH 342/590] Fedora 23 needs to have python2 packages installed --- test/utils/ansible-playbook_integration_runner/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 9bcda9c71e..8683ffd544 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -33,6 +33,15 @@ - include: ec2.yml when: groups['dynamic_hosts'] is not defined +# Have to hardcode these per-slave. We can't even run setup yet so we can't +# introspect what they have. +- hosts: dynamic_hosts + sudo: true + tasks: + - name: Install packages that let setup and package manager modules run + raw: dnf install -y python2 python2-dnf libselinux-python + when: "{{ inventory_hostname }} == 'fedora-23-x86_64'" + - hosts: dynamic_hosts sudo: true vars: From ec60bfbb3f0b88d37b91a2deae2bf6b79a1091dc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 14:36:17 -0800 Subject: [PATCH 343/590] Ubuntu images with hvm ssd --- test/utils/ansible-playbook_integration_runner/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 8683ffd544..b8942172bc 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -4,12 +4,12 @@ slaves: - distribution: "Ubuntu" version: "12.04" - image: "ami-2ccc7a44" + image: "ami-309ddf5a" ssh_user: "ubuntu" platform: "ubuntu-12.04-x86_64" - distribution: "Ubuntu" version: "14.04" - image: "ami-9a562df2" + image: "ami-d06632ba" ssh_user: "ubuntu" platform: "ubuntu-14.04-x86_64" - distribution: "CentOS" From 26e5bcdb39517e8247e59ac038db7dd641cbb7fa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 14:38:54 -0800 Subject: [PATCH 344/590] Bugfix the fedora 23 install task --- test/utils/ansible-playbook_integration_runner/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index b8942172bc..e82e0dea3f 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -37,10 +37,11 @@ # introspect what they have. - hosts: dynamic_hosts sudo: true + gather_facts: False tasks: - name: Install packages that let setup and package manager modules run raw: dnf install -y python2 python2-dnf libselinux-python - when: "{{ inventory_hostname }} == 'fedora-23-x86_64'" + when: "'{{ inventory_hostname }}' == 'fedora-23-x86_64'" - hosts: dynamic_hosts sudo: true From 78dde62710bd63f931bce21cf4352994a5a36873 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 15:14:38 -0800 Subject: [PATCH 345/590] What is going on here --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index d4740d9570..c6971486ec 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -28,6 +28,8 @@ - name: Wait a little longer for centos pause: seconds=20 +- debug: var=ec2.results + - name: Add hosts group temporary inventory group with pem path add_host: name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" From f7ed33378e234542950b992499e848a8284cc2fa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 15:42:41 -0800 Subject: [PATCH 346/590] Fix the fedora host detection --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 -- test/utils/ansible-playbook_integration_runner/main.yml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index c6971486ec..d4740d9570 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -28,8 +28,6 @@ - name: Wait a little longer for centos pause: seconds=20 -- debug: var=ec2.results - - name: Add hosts group temporary inventory group with pem path add_host: name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index e82e0dea3f..4aa17d11c1 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -41,7 +41,7 @@ tasks: - name: Install packages that let setup and package manager modules run raw: dnf install -y python2 python2-dnf libselinux-python - when: "'{{ inventory_hostname }}' == 'fedora-23-x86_64'" + when: "'fedora-23' in '{{ inventory_hostname }}'" - hosts: dynamic_hosts sudo: true From 3197eeaaa8d49c862fcb98165bcb254c74e10f4e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 22:16:49 -0800 Subject: [PATCH 347/590] update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9366dfb63e..15c1c0cca7 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9366dfb63e565c9e0901d714be8832fc89b275d6 +Subproject commit 15c1c0cca79196d4dde630db2a7eee90367051cc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 06bdec0cac..c6829752d8 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 06bdec0cac86ef2339e0b4d8a4616ee24619956f +Subproject commit c6829752d852398c255704cd5d7faa54342e143e From 07a00593066cb439f0b9aea4e815259cc8a2ec75 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 22:23:25 -0800 Subject: [PATCH 348/590] update submodule ref for doc fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 15c1c0cca7..fcb3397df7 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 15c1c0cca79196d4dde630db2a7eee90367051cc +Subproject commit fcb3397df7944ff15ea698b5717c06e8fc7d43ba From d2ad17e88f5f1bc2ed7282ec4322aaffd869834a Mon Sep 17 00:00:00 2001 From: Matt Clay Date: Sat, 19 Dec 2015 00:08:49 -0800 Subject: [PATCH 349/590] Fixed import typo for memcache module in tests. The typo caused the test for the memcached cache plugin to be skipped even when the necessary memcache python module was installed. --- test/units/plugins/cache/test_cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py index 0547ba55bf..cd82e1ef2c 100644 --- a/test/units/plugins/cache/test_cache.py +++ b/test/units/plugins/cache/test_cache.py @@ -26,7 +26,7 @@ from ansible.plugins.cache.memory import CacheModule as MemoryCache HAVE_MEMCACHED = True try: - import memcached + import memcache except ImportError: HAVE_MEMCACHED = False else: From 6127a8585e8eaea159ed5fd91c3ddb61b2d25dc8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 19 Dec 2015 11:45:59 -0500 Subject: [PATCH 350/590] removed invocation info as it is not no_log aware This was added in 1.9 and 2.0 tried to copy, but since it cannot obey no_log restrictions I commented it out. I did not remove as it is still very useful for module invocation debugging. --- lib/ansible/plugins/action/__init__.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e9b18651d6..c363a47ec3 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -82,13 +82,14 @@ class ActionBase(with_metaclass(ABCMeta, object)): * Module parameters. These are stored in self._task.args """ - # store the module invocation details into the results results = {} - if self._task.async == 0: - results['invocation'] = dict( - module_name = self._task.action, - module_args = self._task.args, - ) + # This does not respect no_log set by module args, left here for debugging module invocation + #if self._task.async == 0: + # # store the module invocation details into the results + # results['invocation'] = dict( + # module_name = self._task.action, + # module_args = self._task.args, + # ) return results def _configure_module(self, module_name, module_args, task_vars=None): From c63ae9948543a3f73ae17dc4eecae7b22fb62947 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 10:10:38 -0800 Subject: [PATCH 351/590] Make sure that yum is present on redhat family systems (makes things also work on fedora systems where dnf is the default) --- .../roles/ansible_test_deps/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 234eb70f92..89f7382a1e 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -37,6 +37,8 @@ - python-devel - libselinux-python - python-virtualenv + - yum + - yum-metadata-parser when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies From 2936682f004d9d3fc349e31113607636e971b71b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:09:20 -0800 Subject: [PATCH 352/590] Revert "removed invocation info as it is not no_log aware" This reverts commit 6127a8585e8eaea159ed5fd91c3ddb61b2d25dc8. --- lib/ansible/plugins/action/__init__.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index c363a47ec3..e9b18651d6 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -82,14 +82,13 @@ class ActionBase(with_metaclass(ABCMeta, object)): * Module parameters. These are stored in self._task.args """ + # store the module invocation details into the results results = {} - # This does not respect no_log set by module args, left here for debugging module invocation - #if self._task.async == 0: - # # store the module invocation details into the results - # results['invocation'] = dict( - # module_name = self._task.action, - # module_args = self._task.args, - # ) + if self._task.async == 0: + results['invocation'] = dict( + module_name = self._task.action, + module_args = self._task.args, + ) return results def _configure_module(self, module_name, module_args, task_vars=None): From d32a885e98f9154f5c74afba482b4299a2e2be5e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:24:59 -0800 Subject: [PATCH 353/590] Make return invocation information so that our sanitized copy will take precedence over what the executor knows. --- lib/ansible/module_utils/basic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 62b8cadfd6..4870ed096d 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1431,7 +1431,6 @@ class AnsibleModule(object): self.log(msg, log_args=log_args) - def _set_cwd(self): try: cwd = os.getcwd() @@ -1524,6 +1523,8 @@ class AnsibleModule(object): self.add_path_info(kwargs) if not 'changed' in kwargs: kwargs['changed'] = False + if 'invocation' not in kwargs: + kwargs['invocation'] = self.params kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) @@ -1534,6 +1535,8 @@ class AnsibleModule(object): self.add_path_info(kwargs) assert 'msg' in kwargs, "implementation error -- msg to explain the error is required" kwargs['failed'] = True + if 'invocation' not in kwargs: + kwargs['invocation'] = self.params kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) From 51cca87d67823f4edfc4e05bf3e5a4070e494113 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:27:16 -0800 Subject: [PATCH 354/590] Also need redhat-rpm-config to compile pycrypto --- .../roles/ansible_test_deps/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 89f7382a1e..de08126b82 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -39,6 +39,7 @@ - python-virtualenv - yum - yum-metadata-parser + - redhat-rpm-config when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies From 8ffc1fa838d7e984f4a99568021660cbbd243550 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:31:46 -0800 Subject: [PATCH 355/590] Comment to explain why we strip _ansible_notify specially --- lib/ansible/plugins/action/normal.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index bf93fdad2d..f9b55e1ff5 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -28,11 +28,13 @@ class ActionModule(ActionBase): results = super(ActionModule, self).run(tmp, task_vars) results.update(self._execute_module(tmp=tmp, task_vars=task_vars)) - # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. - for field in ('ansible_notify',): + # + # We don't want modules to determine that running the module fires + # notify handlers. That's for the playbook to decide. + for field in ('_ansible_notify',): if field in results: results.pop(field) From 224d5963361deb33107e5f38fd28a4d5197f931e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:51:16 -0800 Subject: [PATCH 356/590] Remove args from get_name() as we can't tell if any of the args are no_log --- lib/ansible/playbook/task.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 825ee50269..fb75786474 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -107,11 +107,10 @@ class Task(Base, Conditional, Taggable, Become): elif self.name: return self.name else: - flattened_args = self._merge_kv(self.args) if self._role: - return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args) + return "%s : %s" % (self._role.get_name(), self.action) else: - return "%s %s" % (self.action, flattened_args) + return "%s" % (self.action,) def _merge_kv(self, ds): if ds is None: From 9abef1a1d7e8df5e580e17ef4a54cec280fbc7dc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 12:39:48 -0800 Subject: [PATCH 357/590] Troubleshooting has reduced us to this --- test/integration/roles/test_get_url/tasks/main.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index cbf3b345f1..54debc06d1 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -96,12 +96,22 @@ register: get_url_result ignore_errors: True +- name: TROUBLESHOOTING + shell: curl https://foo.sni.velox.ch/ > /var/tmp/velox.html + register: trouble + ignore_errors: True + when: "{{ python_has_ssl_context }}" + +- debug: var=trouble + when: "{{ python_has_ssl_context }}" + +- debug: var=get_url_result + when: "{{ python_has_ssl_context }}" + - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" -# If distros start backporting SNI, can make a new conditional based on whether this works: -# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: From e66c070e5c0d50f0a90fcd3b73044a6faeef7c81 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 13:00:58 -0800 Subject: [PATCH 358/590] Add package module to squash list --- lib/ansible/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 7f74358dd5..5df9602246 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -201,7 +201,7 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa # the module takes both, bad things could happen. # In the future we should probably generalize this even further # (mapping of param: squash field) -DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True) +DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True) # paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True) DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True) From bb2935549f38a83670baadb74041ef98902e0640 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 19 Dec 2015 16:14:56 -0500 Subject: [PATCH 359/590] corrected service detection in docker versions now if 1 == bash it falls back into tool detection --- lib/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 94a5a11f72..796ebc92bd 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -555,8 +555,8 @@ class Facts(object): if proc_1 is None: rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) - if proc_1 in ['init', '/sbin/init']: - # many systems return init, so this cannot be trusted + if proc_1 in ['init', '/sbin/init', 'bash']: + # many systems return init, so this cannot be trusted, bash is from docker proc_1 = None # if not init/None it should be an identifiable or custom init, so we are done! From e2d9f4e2f272c6010b0c00257aa695c1606e05ab Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 15:49:06 -0800 Subject: [PATCH 360/590] Fix unittests for return of invocation from fail_json and exit_json --- test/units/module_utils/basic/test_exit_json.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py index 66610ec3ed..931447f8ab 100644 --- a/test/units/module_utils/basic/test_exit_json.py +++ b/test/units/module_utils/basic/test_exit_json.py @@ -56,7 +56,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=False)) + self.assertEquals(return_val, dict(changed=False, invocation={})) def test_exit_json_args_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -67,7 +67,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", changed=False)) + self.assertEquals(return_val, dict(msg="message", changed=False, invocation={})) def test_fail_json_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -78,13 +78,13 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 1) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", failed=True)) + self.assertEquals(return_val, dict(msg="message", failed=True, invocation={})) def test_exit_json_proper_changed(self): with self.assertRaises(SystemExit) as ctx: self.module.exit_json(changed=True, msg='success') return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=True, msg='success')) + self.assertEquals(return_val, dict(changed=True, msg='success', invocation={})) @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): @@ -94,19 +94,22 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), (dict(username='person', password='password12345'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), (dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), ) From 3ec0104128103c4c37c117b5ef4548733245bcf4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 19 Dec 2015 12:49:06 -0500 Subject: [PATCH 361/590] Fixing bugs in conditional testing with until and some integration runner tweaks --- lib/ansible/executor/task_executor.py | 8 ++--- lib/ansible/playbook/conditional.py | 34 +++++++++---------- lib/ansible/playbook/task.py | 2 +- .../main.yml | 2 +- .../roles/ansible_test_deps/tasks/main.yml | 1 + .../roles/run_integration/tasks/main.yml | 17 +++++----- 6 files changed, 33 insertions(+), 31 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index b0a5157a52..c8b6fa179b 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -35,7 +35,7 @@ from ansible.template import Templar from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode -from ansible.vars.unsafe_proxy import UnsafeProxy +from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var try: from __main__ import display @@ -406,7 +406,7 @@ class TaskExecutor: # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: - vars_copy[self._task.register] = result + vars_copy[self._task.register] = wrap_var(result.copy()) if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout @@ -453,7 +453,7 @@ class TaskExecutor: if attempt < retries - 1: cond = Conditional(loader=self._loader) - cond.when = self._task.until + cond.when = [ self._task.until ] if cond.evaluate_conditional(templar, vars_copy): break @@ -466,7 +466,7 @@ class TaskExecutor: # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: - variables[self._task.register] = result + variables[self._task.register] = wrap_var(result) if 'ansible_facts' in result: variables.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index fc178e2fa1..c8c6a9359e 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -22,7 +22,7 @@ __metaclass__ = type from jinja2.exceptions import UndefinedError from ansible.compat.six import text_type -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleUndefinedVariable from ansible.playbook.attribute import FieldAttribute from ansible.template import Templar @@ -89,16 +89,22 @@ class Conditional: # make sure the templar is using the variables specifed to this method templar.set_available_variables(variables=all_vars) - conditional = templar.template(conditional) - if not isinstance(conditional, basestring) or conditional == "": - return conditional + try: + conditional = templar.template(conditional) + if not isinstance(conditional, text_type) or conditional == "": + return conditional - # a Jinja2 evaluation that results in something Python can eval! - presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional - conditional = templar.template(presented, fail_on_undefined=False) - - val = conditional.strip() - if val == presented: + # a Jinja2 evaluation that results in something Python can eval! + presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional + conditional = templar.template(presented) + val = conditional.strip() + if val == "True": + return True + elif val == "False": + return False + else: + raise AnsibleError("unable to evaluate conditional: %s" % original) + except (AnsibleUndefinedVariable, UndefinedError) as e: # the templating failed, meaning most likely a # variable was undefined. If we happened to be # looking for an undefined variable, return True, @@ -108,11 +114,5 @@ class Conditional: elif "is defined" in original: return False else: - raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented)) - elif val == "True": - return True - elif val == "False": - return False - else: - raise AnsibleError("unable to evaluate conditional: %s" % original) + raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e)) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index fb75786474..62b8cbc999 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -82,7 +82,7 @@ class Task(Base, Conditional, Taggable, Become): _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=3) - _until = FieldAttribute(isa='list') + _until = FieldAttribute(isa='string') def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 4aa17d11c1..27c4ae51b0 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -74,4 +74,4 @@ - name: Fail shell: 'echo "{{ inventory_hostname }}, Failed" && exit 1' - when: "test_results.rc != 0" + when: "'rc' not in test_results or test_results.rc != 0" diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index de08126b82..d9611497e9 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -59,6 +59,7 @@ when: ansible_os_family == 'Debian' - name: update ca certificates + sudo: true yum: name=ca-certificates state=latest when: ansible_os_family == 'RedHat' diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 3eba828544..2d01999dbf 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -6,10 +6,12 @@ - name: Get ansible source dir sudo: false - shell: "cd ~ && pwd" + shell: "cd ~/ansible && pwd" register: results -- shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + args: + chdir: "{{ results.stdout }}" async: 3600 poll: 0 register: async_test_results @@ -17,14 +19,13 @@ environment: TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" - args: - chdir: "{{ results.stdout }}/ansible" - name: poll for test results - async_status: - jid: "{{async_test_results.ansible_job_id}}" + async_status: jid="{{async_test_results.ansible_job_id}}" register: test_results until: test_results.finished - retries: 360 - delay: 10 + retries: 120 + delay: 30 ignore_errors: true + +- debug: var=test_results From 3da312da9c1a92d5e8f47f3274338e4ef476b5a6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 23:11:25 -0800 Subject: [PATCH 362/590] Switch from yum to package when installing sudo so that dnf is handled as well --- .../roles/ansible_test_deps/tasks/main.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index d9611497e9..832138527f 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -4,14 +4,8 @@ when: ansible_os_family == 'Debian' - name: Install sudo - yum: name=sudo state=installed + package: name=sudo state=installed ignore_errors: true - when: ansible_os_family == 'RedHat' - -- name: Install sudo - apt: name=sudo state=installed - ignore_errors: true - when: ansible_os_family == 'Debian' - name: Install RH epel yum: name="epel-release" state=installed From 6ec58bbd5f86bd4f2ca8aa6e7af78ee8ef28ee98 Mon Sep 17 00:00:00 2001 From: Branko Majic Date: Sun, 20 Dec 2015 14:19:20 +0100 Subject: [PATCH 363/590] Adding documentation for the 'dig' lookup (#13126). --- docsite/rst/playbooks_lookups.rst | 106 ++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index 25560e284d..3c2222c337 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -240,6 +240,112 @@ If you're not using 2.0 yet, you can do something similar with the credstash too debug: msg="Poor man's credstash lookup! {{ lookup('pipe', 'credstash -r us-west-1 get my-other-password') }}" +.. _dns_lookup: + +The DNS Lookup (dig) +```````````````````` +.. versionadded:: 1.9.0 + +.. warning:: This lookup depends on the `dnspython `_ + library. + +The ``dig`` lookup runs queries against DNS servers to retrieve DNS records for +a specific name (*FQDN* - fully qualified domain name). It is possible to lookup any DNS record in this manner. + +There is a couple of different syntaxes that can be used to specify what record +should be retrieved, and for which name. It is also possible to explicitly +specify the DNS server(s) to use for lookups. + +In its simplest form, the ``dig`` lookup plugin can be used to retrieve an IPv4 +address (DNS ``A`` record) associated with *FQDN*: + +.. note:: If you need to obtain the ``AAAA`` record (IPv6 address), you must + specify the record type explicitly. Syntax for specifying the record + type is described below. + +.. note:: The trailing dot in most of the examples listed is purely optional, + but is specified for completeness/correctness sake. + +:: + + - debug: msg="The IPv4 address for example.com. is {{ lookup('dig', 'example.com.')}}" + +In addition to (default) ``A`` record, it is also possible to specify a different +record type that should be queried. This can be done by either passing-in +additional parameter of format ``qtype=TYPE`` to the ``dig`` lookup, or by +appending ``/TYPE`` to the *FQDN* being queried. For example:: + + - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com.', 'qtype=TXT') }}" + - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com./TXT') }}" + +If multiple values are associated with the requested record, the results will be +returned as a comma-separated list. In such cases you may want to pass option +``wantlist=True`` to the plugin, which will result in the record values being +returned as a list over which you can iterate later on:: + + - debug: msg="One of the MX records for gmail.com. is {{ item }}" + with_items: "{{ lookup('dig', 'gmail.com./MX', wantlist=True) }}" + +In case of reverse DNS lookups (``PTR`` records), you can also use a convenience +syntax of format ``IP_ADDRESS/PTR``. The following three lines would produce the +same output:: + + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8/PTR') }}" + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa./PTR') }}" + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa.', 'qtype=PTR') }}" + +By default, the lookup will rely on system-wide configured DNS servers for +performing the query. It is also possible to explicitly specify DNS servers to +query using the ``@DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N`` notation. This +needs to be passed-in as an additional parameter to the lookup. For example:: + + - debug: msg="Querying 8.8.8.8 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@8.8.8.8') }}" + +In some cases the DNS records may hold a more complex data structure, or it may +be useful to obtain the results in a form of a dictionary for future +processing. The ``dig`` lookup supports parsing of a number of such records, +with the result being returned as a dictionary. This way it is possible to +easily access such nested data. This return format can be requested by +passing-in the ``flat=0`` option to the lookup. For example:: + + - debug: msg="XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" + with_items: "{{ lookup('dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" + +Take note that due to the way Ansible lookups work, you must pass the +``wantlist=True`` argument to the lookup, otherwise Ansible will report errors. + +Currently the dictionary results are supported for the following records: + +.. note:: *ALL* is not a record per-se, merely the listed fields are available + for any record results you retrieve in the form of a dictionary. + +========== ============================================================================= +Record Fields +---------- ----------------------------------------------------------------------------- +*ALL* owner, ttl, type +A address +AAAA address +CNAME target +DNAME target +DLV algorithm, digest_type, key_tag, digest +DNSKEY flags, algorithm, protocol, key +DS algorithm, digest_type, key_tag, digest +HINFO cpu, os +LOC latitude, longitude, altitude, size, horizontal_precision, vertical_precision +MX preference, exchange +NAPTR order, preference, flags, service, regexp, replacement +NS target +NSEC3PARAM algorithm, flags, iterations, salt +PTR target +RP mbox, txt +SOA mname, rname, serial, refresh, retry, expire, minimum +SPF strings +SRV priority, weight, port, target +SSHFP algorithm, fp_type, fingerprint +TLSA usage, selector, mtype, cert +TXT strings +========== ============================================================================= + .. _more_lookups: More Lookups From b90506341ac77c4885efe754ae401b90b0f61a7f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 08:06:26 -0800 Subject: [PATCH 364/590] Fixes for tests that assumed yum as package manager for systems that have dnf --- .../roles/ec2_elb_instance_setup/tasks/main.yml | 7 ++++++- .../roles/setup_postgresql_db/tasks/main.yml | 8 ++++---- test/integration/roles/test_apt/tasks/main.yml | 1 - .../test_docker/tasks/docker-setup-rht.yml | 17 ++++++++--------- .../roles/test_unarchive/tasks/main.yml | 4 ++++ test/integration/roles/test_yum/tasks/main.yml | 2 ++ 6 files changed, 24 insertions(+), 15 deletions(-) diff --git a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml index 341392b00c..79584893ed 100644 --- a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml +++ b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml @@ -5,7 +5,12 @@ # install apache on the ec2 instances - name: install apache on new ec2 instances - yum: name=httpd + package: name=httpd + when: ansible_os_family == 'RedHat' + +- name: install apache on new ec2 instances + package: name=apache + when: ansible_os_family == 'Debian' - name: start and enable apache service: name=httpd state=started enabled=yes diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index fbcc9cab72..c25318a2ad 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -9,9 +9,9 @@ # Make sure we start fresh - name: remove rpm dependencies for postgresql test - yum: name={{ item }} state=absent + package: name={{ item }} state=absent with_items: postgresql_packages - when: ansible_pkg_mgr == 'yum' + when: ansible_os_family == "RedHat" - name: remove dpkg dependencies for postgresql test apt: name={{ item }} state=absent @@ -35,9 +35,9 @@ when: ansible_os_family == "Debian" - name: install rpm dependencies for postgresql test - yum: name={{ item }} state=latest + package: name={{ item }} state=latest with_items: postgresql_packages - when: ansible_pkg_mgr == 'yum' + when: ansible_os_family == "RedHat" - name: install dpkg dependencies for postgresql test apt: name={{ item }} state=latest diff --git a/test/integration/roles/test_apt/tasks/main.yml b/test/integration/roles/test_apt/tasks/main.yml index 8976087371..552b543d2d 100644 --- a/test/integration/roles/test_apt/tasks/main.yml +++ b/test/integration/roles/test_apt/tasks/main.yml @@ -1,4 +1,3 @@ -# test code for the yum module # (c) 2014, James Tanner # This file is part of Ansible diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml index 3ba234ecff..c25821c3be 100644 --- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml +++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml @@ -1,18 +1,17 @@ -- name: Install docker packages (yum) - yum: +- name: Install docker packages (rht family) + package: state: present name: docker-io,docker-registry,python-docker-py,nginx -- name: Install netcat - yum: +- name: Install netcat (Fedora) + package: state: present name: nmap-ncat - # RHEL7 as well... - when: ansible_distribution == 'Fedora' + when: ansible_distribution == 'Fedora' or (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('>=', 7)) -- name: Install netcat - yum: +- name: Install netcat (RHEL) + package: state: present name: nc - when: ansible_distribution != 'Fedora' + when: ansible_distribution != 'Fedora' and (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('<', 7)) diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index c26d3aeb10..e4f438e525 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -21,6 +21,10 @@ yum: name=zip state=latest when: ansible_pkg_mgr == 'yum' +- name: Ensure zip is present to create test archive (dnf) + dnf: name=zip state=latest + when: ansible_pkg_mgr == 'dnf' + - name: Ensure zip is present to create test archive (apt) apt: name=zip state=latest when: ansible_pkg_mgr == 'apt' diff --git a/test/integration/roles/test_yum/tasks/main.yml b/test/integration/roles/test_yum/tasks/main.yml index 5df887ae9f..b17af6b465 100644 --- a/test/integration/roles/test_yum/tasks/main.yml +++ b/test/integration/roles/test_yum/tasks/main.yml @@ -16,6 +16,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Note: We install the yum package onto Fedora so that this will work on dnf systems +# We want to test that for people who don't want to upgrade their systems. - include: 'yum.yml' when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] From 5fef2c429763db8d088a20c97320936ee06e7fc8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 09:11:53 -0800 Subject: [PATCH 365/590] Try updating the centos7 image to a newer version (trying to resolve issue being unable to connect to some webservers) --- test/utils/ansible-playbook_integration_runner/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 27c4ae51b0..f1bd26b7ea 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -19,7 +19,7 @@ platform: "centos-6.5-x86_64" - distribution: "CentOS" version: "7" - image: "ami-96a818fe" + image: "ami-61bbf104" ssh_user: "centos" platform: "centos-7-x86_64" - distribution: "Fedora" From 6ae04c1e4f698629610030a74f5bb5fc501f5a1e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 20 Dec 2015 12:37:24 -0500 Subject: [PATCH 366/590] Fix logic in PlayIterator when inserting tasks during rescue/always Because the fail_state is potentially non-zero in these block sections, the prior logic led to included tasks not being inserted at all. Related issue: #13605 --- lib/ansible/executor/play_iterator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 795eed2a8c..534f216c30 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -397,7 +397,7 @@ class PlayIterator: def _insert_tasks_into_state(self, state, task_list): # if we've failed at all, or if the task list is empty, just return the current state - if state.fail_state != self.FAILED_NONE or not task_list: + if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list: return state if state.run_state == self.ITERATING_TASKS: From 8d7892cc7b7a95c4efda003c8b187d1bc4875a5f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 10:13:33 -0800 Subject: [PATCH 367/590] Done troubleshooting Revert "Troubleshooting has reduced us to this" This reverts commit 9abef1a1d7e8df5e580e17ef4a54cec280fbc7dc. --- test/integration/roles/test_get_url/tasks/main.yml | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 54debc06d1..cbf3b345f1 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -96,22 +96,12 @@ register: get_url_result ignore_errors: True -- name: TROUBLESHOOTING - shell: curl https://foo.sni.velox.ch/ > /var/tmp/velox.html - register: trouble - ignore_errors: True - when: "{{ python_has_ssl_context }}" - -- debug: var=trouble - when: "{{ python_has_ssl_context }}" - -- debug: var=get_url_result - when: "{{ python_has_ssl_context }}" - - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" +# If distros start backporting SNI, can make a new conditional based on whether this works: +# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: From 3792a586b51ce598ab71bfab004a4bd97f004101 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 11:33:42 -0800 Subject: [PATCH 368/590] Since the velox test server seems to be dropping using iptables to drop requests from aws, test via a different website instead --- .../roles/test_get_url/tasks/main.yml | 45 +++++++++++++++---- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index cbf3b345f1..a0ff3797a8 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -85,23 +85,51 @@ - "result.changed == true" - "stat_result.stat.exists == true" -# SNI Tests -# SNI is only built into the stdlib from python-2.7.9 onwards +# At the moment, AWS can't make an https request to velox.ch... connection +# timed out. So we'll use a different test until/unless the problem is resolved +## SNI Tests +## SNI is only built into the stdlib from python-2.7.9 onwards +#- name: Test that SNI works +# get_url: +# # A test site that returns a page with information on what SNI information +# # the client sent. A failure would have the string: did not send a TLS server name indication extension +# url: 'https://foo.sni.velox.ch/' +# dest: "{{ output_dir }}/sni.html" +# register: get_url_result +# ignore_errors: True +# +#- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" +# register: data_result +# when: "{{ python_has_ssl_context }}" +# +#- debug: var=get_url_result +#- name: Assert that SNI works with this python version +# assert: +# that: +# - 'data_result.rc == 0' +# - '"failed" not in get_url_result' +# when: "{{ python_has_ssl_context }}" +# +## If the client doesn't support SNI then get_url should have failed with a certificate mismatch +#- name: Assert that hostname verification failed because SNI is not supported on this version of python +# assert: +# that: +# - 'get_url_result["failed"]' +# when: "{{ not python_has_ssl_context }}" + +# These tests are just side effects of how the site is hosted. It's not +# specifically a test site. So the tests may break due to the hosting changing - name: Test that SNI works get_url: - # A test site that returns a page with information on what SNI information - # the client sent. A failure would have the string: did not send a TLS server name indication extension - url: 'https://foo.sni.velox.ch/' + url: 'https://www.mnot.net/blog/2014/05/09/if_you_can_read_this_youre_sniing' dest: "{{ output_dir }}/sni.html" register: get_url_result ignore_errors: True -- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" +- command: "grep '

If You Can Read This, You're SNIing

' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" -# If distros start backporting SNI, can make a new conditional based on whether this works: -# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: @@ -116,3 +144,4 @@ that: - 'get_url_result["failed"]' when: "{{ not python_has_ssl_context }}" +# End hacky SNI test section From 21ca0ce1ce12eb4e487d479abdc355972d2c2309 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 11:46:49 -0800 Subject: [PATCH 369/590] Fix test playbook syntax --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index a0ff3797a8..630287c987 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -126,7 +126,7 @@ register: get_url_result ignore_errors: True -- command: "grep '

If You Can Read This, You're SNIing

' {{ output_dir}}/sni.html" +- command: "grep '

If You Can Read This, You\\'re SNIing

' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" From 6963955cb4a607c8548669136cb266c25d9f9ceb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 11:51:32 -0800 Subject: [PATCH 370/590] And change the task a little more since different shlex versions are handling the quotes differently --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 630287c987..9ed0549ec4 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -126,7 +126,7 @@ register: get_url_result ignore_errors: True -- command: "grep '

If You Can Read This, You\\'re SNIing

' {{ output_dir}}/sni.html" +- command: "grep '

If You Can Read This, You.re SNIing

' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" From b85b92ecdd03429fd84d384a495fbb5894da9ab0 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 14 Dec 2015 14:23:44 +0100 Subject: [PATCH 371/590] cloudstack: test_cs_instance: more integration tests cloudstack: extend test_cs_instance addressing recovering cloudstack: test_cs_instance: add tests for using display_name as indentifier. --- .../roles/test_cs_instance/tasks/absent.yml | 20 ++ .../tasks/absent_display_name.yml | 43 +++++ .../roles/test_cs_instance/tasks/cleanup.yml | 6 - .../roles/test_cs_instance/tasks/main.yml | 5 + .../roles/test_cs_instance/tasks/present.yml | 37 +++- .../tasks/present_display_name.yml | 176 ++++++++++++++++++ .../roles/test_cs_instance/tasks/setup.yml | 8 - 7 files changed, 272 insertions(+), 23 deletions(-) create mode 100644 test/integration/roles/test_cs_instance/tasks/absent_display_name.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/present_display_name.yml diff --git a/test/integration/roles/test_cs_instance/tasks/absent.yml b/test/integration/roles/test_cs_instance/tasks/absent.yml index bafb3ec9e7..eeab47a61d 100644 --- a/test/integration/roles/test_cs_instance/tasks/absent.yml +++ b/test/integration/roles/test_cs_instance/tasks/absent.yml @@ -21,3 +21,23 @@ that: - instance|success - not instance|changed + +- name: test recover to stopped state and update a deleted instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + state: stopped + register: instance +- name: verify test recover to stopped state and update a deleted instance + assert: + that: + - instance|success + - instance|changed + - instance.state == "Stopped" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + +# force expunge, only works with admin permissions +- cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: expunged + failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml new file mode 100644 index 0000000000..35fa6dff34 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml @@ -0,0 +1,43 @@ +--- +- name: test destroy instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.state == "Destroyed" + +- name: test destroy instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + +- name: test recover to stopped state and update a deleted instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + state: stopped + register: instance +- name: verify test recover to stopped state and update a deleted instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.state == "Stopped" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + +# force expunge, only works with admin permissions +- cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: expunged + failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/cleanup.yml b/test/integration/roles/test_cs_instance/tasks/cleanup.yml index 63192dbd60..e6b6550dfa 100644 --- a/test/integration/roles/test_cs_instance/tasks/cleanup.yml +++ b/test/integration/roles/test_cs_instance/tasks/cleanup.yml @@ -28,9 +28,3 @@ assert: that: - sg|success - -# force expunge, only works with admin permissions -- cs_instance: - name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - state: expunged - failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/main.yml b/test/integration/roles/test_cs_instance/tasks/main.yml index d1a67e1781..d6475a4766 100644 --- a/test/integration/roles/test_cs_instance/tasks/main.yml +++ b/test/integration/roles/test_cs_instance/tasks/main.yml @@ -4,3 +4,8 @@ - include: tags.yml - include: absent.yml - include: cleanup.yml + +- include: setup.yml +- include: present_display_name.yml +- include: absent_display_name.yml +- include: cleanup.yml diff --git a/test/integration/roles/test_cs_instance/tasks/present.yml b/test/integration/roles/test_cs_instance/tasks/present.yml index 10242a57fd..ad3d391ef9 100644 --- a/test/integration/roles/test_cs_instance/tasks/present.yml +++ b/test/integration/roles/test_cs_instance/tasks/present.yml @@ -1,4 +1,12 @@ --- +- name: setup instance to be absent + cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent + register: instance +- name: verify instance to be absent + assert: + that: + - instance|success + - name: test create instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -21,7 +29,6 @@ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" - not instance.tags - - name: test create instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -44,7 +51,6 @@ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" - not instance.tags - - name: test running instance not updated cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -60,7 +66,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" - - name: test stopping instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -76,7 +81,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Stopped" - - name: test stopping instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -89,7 +93,6 @@ - not instance|changed - instance.state == "Stopped" - - name: test updating stopped instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -106,7 +109,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Stopped" - - name: test starting instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -122,7 +124,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Running" - - name: test starting instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -133,6 +134,9 @@ that: - instance|success - not instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Running" - name: test force update running instance @@ -147,7 +151,7 @@ - instance|success - instance|changed - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" @@ -163,6 +167,21 @@ - instance|success - not instance|changed - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" + +- name: test restore instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + state: restored + register: instance +- name: verify restore instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" diff --git a/test/integration/roles/test_cs_instance/tasks/present_display_name.yml b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml new file mode 100644 index 0000000000..c1882149d9 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml @@ -0,0 +1,176 @@ +--- +- name: setup instance with display_name to be absent + cs_instance: display_name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent + register: instance +- name: verify instance with display_name to be absent + assert: + that: + - instance|success + +- name: test create instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + +- name: test create instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + +- name: test running instance with display_name not updated + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_2 }}" + register: instance +- name: verify running instance with display_name not updated + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test stopping instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Stopped" + +- name: test stopping instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.state == "Stopped" + +- name: test updating stopped instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_2 }}" + register: instance +- name: verify updating stopped instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Stopped" + +- name: test starting instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Running" + +- name: test starting instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Running" + +- name: test force update running instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + force: true + register: instance +- name: verify force update running instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test force update running instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + force: true + register: instance +- name: verify force update running instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test restore instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + state: restored + register: instance +- name: verify restore instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" diff --git a/test/integration/roles/test_cs_instance/tasks/setup.yml b/test/integration/roles/test_cs_instance/tasks/setup.yml index 32f3ff13e2..0039ce8f1b 100644 --- a/test/integration/roles/test_cs_instance/tasks/setup.yml +++ b/test/integration/roles/test_cs_instance/tasks/setup.yml @@ -22,11 +22,3 @@ assert: that: - sg|success - -- name: setup instance to be absent - cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent - register: instance -- name: verify instance to be absent - assert: - that: - - instance|success From 3a57d9472c6788ce6fbb700108fbc776527fc3df Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 20 Dec 2015 17:55:39 -0500 Subject: [PATCH 372/590] Save output of integration test results to files we can archive --- .../roles/run_integration/tasks/main.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 2d01999dbf..f67f088246 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -28,4 +28,14 @@ delay: 30 ignore_errors: true -- debug: var=test_results +- name: save stdout test results for each host + local_action: copy + args: + dest: "{{sync_dir}}/{{inventory_hostname}}.stdout_results.txt" + content: "{{test_results.stdout}}" + +- name: save stderr test results for each host + local_action: copy + args: + dest: "{{sync_dir}}/{{inventory_hostname}}.stderr_results.txt" + content: "{{test_results.stderr}}" From 54455a06e55756b31493fd25b1871146c8fe6ab2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 20 Dec 2015 21:32:37 -0500 Subject: [PATCH 373/590] Disable docker test for Fedora, due to broken packaging --- test/integration/destructive.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 626124d14f..3e8cca385e 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -17,5 +17,5 @@ - { role: test_mysql_db, tags: test_mysql_db} - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} - - { role: test_docker, tags: test_docker} + - { role: test_docker, tags: test_docker, when: ansible_distribution != "Fedora" } - { role: test_zypper, tags: test_zypper} From a4674906c60da6035345c2bbe89983b5a6e3b69d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Mon, 21 Dec 2015 13:01:58 -0500 Subject: [PATCH 374/590] Merge role params into variables separately from other variables Fixes #13617 --- lib/ansible/playbook/role/__init__.py | 6 ++++++ lib/ansible/vars/__init__.py | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index f308954f52..ce82573dc0 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -265,6 +265,12 @@ class Role(Base, Become, Conditional, Taggable): inherited_vars = combine_vars(inherited_vars, parent._role_params) return inherited_vars + def get_role_params(self): + params = {} + for dep in self.get_all_dependencies(): + params = combine_vars(params, dep._role_params) + return params + def get_vars(self, dep_chain=[], include_params=True): all_vars = self.get_inherited_vars(dep_chain, include_params=include_params) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 1184ec5049..699333a589 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -308,6 +308,7 @@ class VariableManager: if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): + all_vars = combine_vars(all_vars, role.get_role_params()) all_vars = combine_vars(all_vars, role.get_vars(include_params=False)) if task: From 593d80c63d408012550850eb06d85387588cee3b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 21 Dec 2015 13:14:51 -0500 Subject: [PATCH 375/590] role search path clarified --- docsite/rst/playbooks_roles.rst | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index c6c01db5d4..2e1173acda 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -191,11 +191,8 @@ This designates the following behaviors, for each role 'x': - If roles/x/handlers/main.yml exists, handlers listed therein will be added to the play - If roles/x/vars/main.yml exists, variables listed therein will be added to the play - If roles/x/meta/main.yml exists, any role dependencies listed therein will be added to the list of roles (1.3 and later) -- Any copy tasks can reference files in roles/x/files/ without having to path them relatively or absolutely -- Any script tasks can reference scripts in roles/x/files/ without having to path them relatively or absolutely -- Any template tasks can reference files in roles/x/templates/ without having to path them relatively or absolutely -- Any include tasks can reference files in roles/x/tasks/ without having to path them relatively or absolutely - +- Any copy, script, template or include tasks (in the role) can reference files in roles/x/files/ without having to path them relatively or absolutely + In Ansible 1.4 and later you can configure a roles_path to search for roles. Use this to check all of your common roles out to one location, and share them easily between multiple playbook projects. See :doc:`intro_configuration` for details about how to set this up in ansible.cfg. From 75e94e0cba538c9ed532374b219c45e91fd89db8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 21 Dec 2015 13:06:48 -0500 Subject: [PATCH 376/590] allow for non standard hostnames * Changed parse_addresses to throw exceptions instead of passing None * Switched callers to trap and pass through the original values. * Added very verbose notice * Look at deprecating this and possibly validate at plugin instead fixes #13608 --- lib/ansible/inventory/__init__.py | 21 ++++++++++++--------- lib/ansible/inventory/ini.py | 11 +++++++---- lib/ansible/parsing/utils/addresses.py | 22 +++++++++++----------- lib/ansible/plugins/action/add_host.py | 10 +++++++--- test/units/parsing/test_addresses.py | 14 ++++++++++++-- 5 files changed, 49 insertions(+), 29 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 95e193f381..095118e50e 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -109,7 +109,12 @@ class Inventory(object): pass elif isinstance(host_list, list): for h in host_list: - (host, port) = parse_address(h, allow_ranges=False) + try: + (host, port) = parse_address(h, allow_ranges=False) + except AnsibleError as e: + display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_string(e)) + host = h + port = None all.add_host(Host(host, port)) elif self._loader.path_exists(host_list): #TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins' @@ -228,15 +233,13 @@ class Inventory(object): # If it doesn't, it could still be a single pattern. This accounts for # non-separator uses of colons: IPv6 addresses and [x:y] host ranges. else: - (base, port) = parse_address(pattern, allow_ranges=True) - if base: + try: + (base, port) = parse_address(pattern, allow_ranges=True) patterns = [pattern] - - # The only other case we accept is a ':'-separated list of patterns. - # This mishandles IPv6 addresses, and is retained only for backwards - # compatibility. - - else: + except: + # The only other case we accept is a ':'-separated list of patterns. + # This mishandles IPv6 addresses, and is retained only for backwards + # compatibility. patterns = re.findall( r'''(?: # We want to match something comprising: [^\s:\[\]] # (anything other than whitespace or ':[]' diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 537fde1ef9..9224ef2d23 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -23,7 +23,7 @@ import ast import re from ansible import constants as C -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleParserError from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range @@ -264,9 +264,12 @@ class InventoryParser(object): # Can the given hostpattern be parsed as a host with an optional port # specification? - (pattern, port) = parse_address(hostpattern, allow_ranges=True) - if not pattern: - self._raise_error("Can't parse '%s' as host[:port]" % hostpattern) + try: + (pattern, port) = parse_address(hostpattern, allow_ranges=True) + except: + # not a recognizable host pattern + pattern = hostpattern + port = None # Once we have separated the pattern, we expand it into list of one or # more hostnames, depending on whether it contains any [x:y] ranges. diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py index 387f05c627..ebfd850ac6 100644 --- a/lib/ansible/parsing/utils/addresses.py +++ b/lib/ansible/parsing/utils/addresses.py @@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re +from ansible.errors import AnsibleParserError, AnsibleError # Components that match a numeric or alphanumeric begin:end or begin:end:step # range expression inside square brackets. @@ -162,6 +163,7 @@ patterns = { $ '''.format(label=label), re.X|re.I|re.UNICODE ), + } def parse_address(address, allow_ranges=False): @@ -183,8 +185,8 @@ def parse_address(address, allow_ranges=False): # First, we extract the port number if one is specified. port = None - for type in ['bracketed_hostport', 'hostport']: - m = patterns[type].match(address) + for matching in ['bracketed_hostport', 'hostport']: + m = patterns[matching].match(address) if m: (address, port) = m.groups() port = int(port) @@ -194,22 +196,20 @@ def parse_address(address, allow_ranges=False): # numeric ranges, or a hostname with alphanumeric ranges. host = None - for type in ['ipv4', 'ipv6', 'hostname']: - m = patterns[type].match(address) + for matching in ['ipv4', 'ipv6', 'hostname']: + m = patterns[matching].match(address) if m: host = address continue # If it isn't any of the above, we don't understand it. - if not host: - return (None, None) - - # If we get to this point, we know that any included ranges are valid. If - # the caller is prepared to handle them, all is well. Otherwise we treat - # it as a parse failure. + raise AnsibleError("Not a valid network hostname: %s" % address) + # If we get to this point, we know that any included ranges are valid. + # If the caller is prepared to handle them, all is well. + # Otherwise we treat it as a parse failure. if not allow_ranges and '[' in host: - return (None, None) + raise AnsibleParserError("Detected range in host but was asked to ignore ranges") return (host, port) diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py index 4bf43f1400..b3aec20437 100644 --- a/lib/ansible/plugins/action/add_host.py +++ b/lib/ansible/plugins/action/add_host.py @@ -53,9 +53,13 @@ class ActionModule(ActionBase): new_name = self._task.args.get('name', self._task.args.get('hostname', None)) display.vv("creating host via 'add_host': hostname=%s" % new_name) - name, port = parse_address(new_name, allow_ranges=False) - if not name: - raise AnsibleError("Invalid inventory hostname: %s" % new_name) + try: + name, port = parse_address(new_name, allow_ranges=False) + except: + # not a parsable hostname, but might still be usable + name = new_name + port = None + if port: self._task.args['ansible_ssh_port'] = port diff --git a/test/units/parsing/test_addresses.py b/test/units/parsing/test_addresses.py index 870cbb0a14..a688d0253b 100644 --- a/test/units/parsing/test_addresses.py +++ b/test/units/parsing/test_addresses.py @@ -71,7 +71,12 @@ class TestParseAddress(unittest.TestCase): for t in self.tests: test = self.tests[t] - (host, port) = parse_address(t) + try: + (host, port) = parse_address(t) + except: + host = None + port = None + assert host == test[0] assert port == test[1] @@ -79,6 +84,11 @@ class TestParseAddress(unittest.TestCase): for t in self.range_tests: test = self.range_tests[t] - (host, port) = parse_address(t, allow_ranges=True) + try: + (host, port) = parse_address(t, allow_ranges=True) + except: + host = None + port = None + assert host == test[0] assert port == test[1] From 08b580decce79deac3c7c2d828d6a8ef9dd6e70c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 14:09:02 -0500 Subject: [PATCH 377/590] Parallelize make command for integration test runner Also adds a new var, used by the prepare_tests role, to prevent it from deleting the temp test directory at the start of each play to avoid any potential race conditions --- test/integration/roles/prepare_tests/tasks/main.yml | 1 + .../roles/run_integration/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/prepare_tests/tasks/main.yml b/test/integration/roles/prepare_tests/tasks/main.yml index 3641880baa..7983ea5236 100644 --- a/test/integration/roles/prepare_tests/tasks/main.yml +++ b/test/integration/roles/prepare_tests/tasks/main.yml @@ -22,6 +22,7 @@ always_run: True tags: - prepare + when: clean_working_dir|default("yes")|bool - name: create the test directory file: name={{output_dir}} state=directory diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index f67f088246..8a306a8ada 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -9,7 +9,7 @@ shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j4 {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 From 6d6822e66e43658c01b68bab2ed897e0ef31c784 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 14:37:17 -0500 Subject: [PATCH 378/590] Kick up the integration runner test image size --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index d4740d9570..55619776d9 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.medium' + instance_type: 'm3.large' image: '{{ item.image }}' wait: true region: 'us-east-1' From 45afa642c3a69d209fefd7debfb38df9d8b757fd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 15:48:58 -0500 Subject: [PATCH 379/590] Integration test runner tweaks --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- .../roles/run_integration/tasks/main.yml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 55619776d9..8a48f0ce6e 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.large' + instance_type: 'm3.xlarge' image: '{{ item.image }}' wait: true region: 'us-east-1' diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 8a306a8ada..6b37d85c2e 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -3,13 +3,14 @@ synchronize: src: "{{ sync_dir }}/" dest: "~/ansible" + no_log: true - name: Get ansible source dir sudo: false shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j4 {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j2 {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 @@ -27,6 +28,7 @@ retries: 120 delay: 30 ignore_errors: true + no_log: true - name: save stdout test results for each host local_action: copy From 8119ea37afe5e94a1d98cec9fe7ae760b10a9adc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 15:55:16 -0500 Subject: [PATCH 380/590] Dropping instance size back down since we're not doing parallel builds --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 8a48f0ce6e..55619776d9 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.xlarge' + instance_type: 'm3.large' image: '{{ item.image }}' wait: true region: 'us-east-1' From d22bbbf52c08e03b63d6045768f3000531f875e9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 16:11:53 -0500 Subject: [PATCH 381/590] Actually disable parallel makes for integration runner --- .../roles/run_integration/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 6b37d85c2e..a833c96558 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -10,7 +10,7 @@ shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j2 {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 From 0c013f592a31c06baac7aadf27d23598f6abe931 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 21 Dec 2015 13:52:41 -0800 Subject: [PATCH 382/590] Transform the command we pass to subprocess into a byte string in _low_level-exec_command --- lib/ansible/plugins/action/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e9b18651d6..e88a55a15c 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -487,7 +487,8 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - + # We may need to revisit this later. + cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd From bbdfaf052209242fbd262860aeda81e59d694243 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 00:24:35 -0500 Subject: [PATCH 383/590] move hostvars.vars to vars this fixes duplication under hostvars and exposes all vars in the vars dict which makes dynamic reference possible on 'non hostvars' --- lib/ansible/vars/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 699333a589..4135ff1768 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -259,8 +259,6 @@ class VariableManager: except KeyError: pass - all_vars['vars'] = all_vars.copy() - if play: all_vars = combine_vars(all_vars, play.get_vars()) @@ -343,6 +341,8 @@ class VariableManager: all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars + if task or play: + all_vars['vars'] = all_vars.copy() debug("done with get_vars()") return all_vars From c60749c9222c8139042a0f4280d6622b209de550 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 22 Dec 2015 09:14:12 -0600 Subject: [PATCH 384/590] Also convert ints to bool for type=bool --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 62b8cadfd6..8a135b300f 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1274,7 +1274,7 @@ class AnsibleModule(object): if isinstance(value, bool): return value - if isinstance(value, basestring): + if isinstance(value, basestring) or isinstance(value, int): return self.boolean(value) raise TypeError('%s cannot be converted to a bool' % type(value)) From b310d0ce76c05bb7a7a47aa7b7537b9adc916171 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 07:22:44 -0800 Subject: [PATCH 385/590] Update the developing doc to modern method of specifying bool argspec values --- docsite/rst/developing_modules.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index fde4b5704b..39bfd9e3d9 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -247,7 +247,7 @@ And instantiating the module class like:: argument_spec = dict( state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), - enabled = dict(required=True, choices=BOOLEANS), + enabled = dict(required=True, type='bool'), something = dict(aliases=['whatever']) ) ) @@ -335,7 +335,7 @@ and guidelines: * If you have a company module that returns facts specific to your installations, a good name for this module is `site_facts`. -* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "choices=BOOLEANS" and a module.boolean(value) casting function. +* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'" and a module.boolean(value) casting function. * Include a minimum of dependencies if possible. If there are dependencies, document them at the top of the module file, and have the module raise JSON error messages when the import fails. From b33f72636a3b7f3a256185afde1aae3d9703235e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 07:25:50 -0800 Subject: [PATCH 386/590] Also remove the bool casting function info (transparent to module writer now) --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 39bfd9e3d9..141f81bd08 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -335,7 +335,7 @@ and guidelines: * If you have a company module that returns facts specific to your installations, a good name for this module is `site_facts`. -* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'" and a module.boolean(value) casting function. +* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'". * Include a minimum of dependencies if possible. If there are dependencies, document them at the top of the module file, and have the module raise JSON error messages when the import fails. From c4da5840b5e38aea1740e68f7100256c93dfbb17 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 08:22:02 -0800 Subject: [PATCH 387/590] Convert to bytes later so that make_become_command can jsut operate on text type. --- lib/ansible/plugins/action/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e88a55a15c..765ba66316 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -487,8 +487,6 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - # We may need to revisit this later. - cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd @@ -505,7 +503,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): cmd = self._play_context.make_become_cmd(cmd, executable=executable) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) - rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) + rc, stdout, stderr = self._connection.exec_command(to_bytes(cmd, errors='strict'), in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type From b22d998d1d9acbda6f458ea99d7e5266d69e035c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Tue, 22 Dec 2015 16:30:29 +0100 Subject: [PATCH 388/590] Fix make tests-py3 on devel. Fix for https://github.com/ansible/ansible/issues/13638. --- test/units/plugins/action/test_action.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 0e47b6a538..dcd0437595 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -42,14 +42,14 @@ class TestActionBase(unittest.TestCase): play_context.become = True play_context.become_user = play_context.remote_user = 'root' - play_context.make_become_cmd = Mock(return_value='CMD') + play_context.make_become_cmd = Mock(return_value=b'CMD') - action_base._low_level_execute_command('ECHO', sudoable=True) + action_base._low_level_execute_command(b'ECHO', sudoable=True) play_context.make_become_cmd.assert_not_called() play_context.remote_user = 'apo' - action_base._low_level_execute_command('ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None) + action_base._low_level_execute_command(b'ECHO', sudoable=True) + play_context.make_become_cmd.assert_called_once_with(b'ECHO', executable=None) play_context.make_become_cmd.reset_mock() @@ -57,7 +57,7 @@ class TestActionBase(unittest.TestCase): C.BECOME_ALLOW_SAME_USER = True try: play_context.remote_user = 'root' - action_base._low_level_execute_command('ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None) + action_base._low_level_execute_command(b'ECHO SAME', sudoable=True) + play_context.make_become_cmd.assert_called_once_with(b'ECHO SAME', executable=None) finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 010839aedc5d903b7ef2fac1b564642cd036e95e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 17:15:58 -0500 Subject: [PATCH 389/590] fix no_log disclosure when using aliases --- lib/ansible/module_utils/basic.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 4aee3b4169..91ea874d85 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -516,6 +516,7 @@ class AnsibleModule(object): self._debug = False self.aliases = {} + self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug'] if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.items(): @@ -524,6 +525,14 @@ class AnsibleModule(object): self.params = self._load_params() + # append to legal_inputs and then possibly check against them + try: + self.aliases = self._handle_aliases() + except Exception, e: + # use exceptions here cause its not safe to call vail json until no_log is processed + print('{"failed": true, "msg": "Module alias error: %s"}' % str(e)) + sys.exit(1) + # Save parameter values that should never be logged self.no_log_values = set() # Use the argspec to determine which args are no_log @@ -538,10 +547,6 @@ class AnsibleModule(object): # reset to LANG=C if it's an invalid/unavailable locale self._check_locale() - self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug'] - - # append to legal_inputs and then possibly check against them - self.aliases = self._handle_aliases() self._check_arguments(check_invalid_arguments) @@ -1064,6 +1069,7 @@ class AnsibleModule(object): self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e) def _handle_aliases(self): + # this uses exceptions as it happens before we can safely call fail_json aliases_results = {} #alias:canon for (k,v) in self.argument_spec.items(): self._legal_inputs.append(k) @@ -1072,11 +1078,11 @@ class AnsibleModule(object): required = v.get('required', False) if default is not None and required: # not alias specific but this is a good place to check this - self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k) + raise Exception("internal error: required and default are mutually exclusive for %s" % k) if aliases is None: continue if type(aliases) != list: - self.fail_json(msg='internal error: aliases must be a list') + raise Exception('internal error: aliases must be a list') for alias in aliases: self._legal_inputs.append(alias) aliases_results[alias] = k From 202b92179d247e508fe4190edc28614b136a5b89 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 22:09:45 -0500 Subject: [PATCH 390/590] corrected role path search order the unfraking was matching roles in current dir as it always returns a full path, pushed to the bottom as match of last resort fixes #13645 --- lib/ansible/playbook/role/definition.py | 70 ++++++++++++------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py index 7e8f47e9be..0af49cec91 100644 --- a/lib/ansible/playbook/role/definition.py +++ b/lib/ansible/playbook/role/definition.py @@ -135,46 +135,44 @@ class RoleDefinition(Base, Become, Conditional, Taggable): append it to the default role path ''' - role_path = unfrackpath(role_name) + # we always start the search for roles in the base directory of the playbook + role_search_paths = [ + os.path.join(self._loader.get_basedir(), u'roles'), + self._loader.get_basedir(), + ] + # also search in the configured roles path + if C.DEFAULT_ROLES_PATH: + configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) + role_search_paths.extend(configured_paths) + + # finally, append the roles basedir, if it was set, so we can + # search relative to that directory for dependent roles + if self._role_basedir: + role_search_paths.append(self._role_basedir) + + # create a templar class to template the dependency names, in + # case they contain variables + if self._variable_manager is not None: + all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) + else: + all_vars = dict() + + templar = Templar(loader=self._loader, variables=all_vars) + role_name = templar.template(role_name) + + # now iterate through the possible paths and return the first one we find + for path in role_search_paths: + path = templar.template(path) + role_path = unfrackpath(os.path.join(path, role_name)) + if self._loader.path_exists(role_path): + return (role_name, role_path) + + # if not found elsewhere try to extract path from name + role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) - else: - # we always start the search for roles in the base directory of the playbook - role_search_paths = [ - os.path.join(self._loader.get_basedir(), u'roles'), - u'./roles', - self._loader.get_basedir(), - u'./' - ] - - # also search in the configured roles path - if C.DEFAULT_ROLES_PATH: - configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) - role_search_paths.extend(configured_paths) - - # finally, append the roles basedir, if it was set, so we can - # search relative to that directory for dependent roles - if self._role_basedir: - role_search_paths.append(self._role_basedir) - - # create a templar class to template the dependency names, in - # case they contain variables - if self._variable_manager is not None: - all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) - else: - all_vars = dict() - - templar = Templar(loader=self._loader, variables=all_vars) - role_name = templar.template(role_name) - - # now iterate through the possible paths and return the first one we find - for path in role_search_paths: - path = templar.template(path) - role_path = unfrackpath(os.path.join(path, role_name)) - if self._loader.path_exists(role_path): - return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds) From 957b376f9eb959f4f3627a622f7776a26442bf9c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 22:45:25 -0500 Subject: [PATCH 391/590] better module error handling * now module errors clearly state msg=MODULE FAILURE * module's stdout and stderr go into module_stdout and module_stderr keys which only appear during parsing failure * invocation module_args are deleted from results provided by action plugin as errors can keep us from overwriting and then disclosing info that was meant to be kept hidden due to no_log * fixed invocation module_args set by basic.py as it was creating different keys as the invocation in action plugin base. * results now merge --- lib/ansible/module_utils/basic.py | 4 ++-- lib/ansible/plugins/action/__init__.py | 5 +++-- lib/ansible/plugins/action/normal.py | 5 ++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 91ea874d85..0391035e88 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1530,7 +1530,7 @@ class AnsibleModule(object): if not 'changed' in kwargs: kwargs['changed'] = False if 'invocation' not in kwargs: - kwargs['invocation'] = self.params + kwargs['invocation'] = {'module_args': self.params} kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) @@ -1542,7 +1542,7 @@ class AnsibleModule(object): assert 'msg' in kwargs, "implementation error -- msg to explain the error is required" kwargs['failed'] = True if 'invocation' not in kwargs: - kwargs['invocation'] = self.params + kwargs['invocation'] = {'module_args': self.params} kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 765ba66316..5383f8afd4 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -460,9 +460,10 @@ class ActionBase(with_metaclass(ABCMeta, object)): if 'stderr' in res and res['stderr'].startswith(u'Traceback'): data['exception'] = res['stderr'] else: - data['msg'] = res.get('stdout', u'') + data['msg'] = "MODULE FAILURE" + data['module_stdout'] = res.get('stdout', u'') if 'stderr' in res: - data['msg'] += res['stderr'] + data['module_stderr'] = res['stderr'] # pre-split stdout into lines, if stdout is in the data and there # isn't already a stdout_lines value there diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index f9b55e1ff5..932ad8309c 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -18,6 +18,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash class ActionModule(ActionBase): @@ -27,7 +28,9 @@ class ActionModule(ActionBase): task_vars = dict() results = super(ActionModule, self).run(tmp, task_vars) - results.update(self._execute_module(tmp=tmp, task_vars=task_vars)) + # remove as modules might hide due to nolog + del results['invocation']['module_args'] + results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars)) # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. From 809c9af68cac56180b336d6ebe29d70b9d10ac14 Mon Sep 17 00:00:00 2001 From: Matt Roberts Date: Wed, 23 Dec 2015 08:18:46 +0000 Subject: [PATCH 392/590] Update playbooks_intro.rst If you follow the documentation through in order you shouldn't have read about modules yet. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 28c809f013..55cd3359be 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -41,7 +41,7 @@ Each playbook is composed of one or more 'plays' in a list. The goal of a play is to map a group of hosts to some well defined roles, represented by things ansible calls tasks. At a basic level, a task is nothing more than a call -to an ansible module, which you should have learned about in earlier chapters. +to an ansible module (see :doc:`Modules`). By composing a playbook of multiple 'plays', it is possible to orchestrate multi-machine deployments, running certain steps on all From 42b9a206ada579000a64cdcb7a0c82ecfd99c451 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 23 Dec 2015 11:44:30 +0100 Subject: [PATCH 393/590] Fix last commit, make it python3 compatible (and py24) --- lib/ansible/module_utils/basic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 91ea874d85..f9dc964e67 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -528,7 +528,8 @@ class AnsibleModule(object): # append to legal_inputs and then possibly check against them try: self.aliases = self._handle_aliases() - except Exception, e: + except Exception: + e = get_exception() # use exceptions here cause its not safe to call vail json until no_log is processed print('{"failed": true, "msg": "Module alias error: %s"}' % str(e)) sys.exit(1) From b201cf2ee13a9e4e1c5dc222043e3f1c84940044 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 23 Dec 2015 10:29:59 -0500 Subject: [PATCH 394/590] switched from pythonic None to generic null --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 141f81bd08..d3781b2f7f 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -481,7 +481,7 @@ Module checklist * The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work * Documentation: Make sure it exists * `required` should always be present, be it true or false - * If `required` is false you need to document `default`, even if the default is 'None' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. + * If `required` is false you need to document `default`, even if the default is 'null' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. * `default` is not needed for `required: true` * Remove unnecessary doc like `aliases: []` or `choices: []` * The version is not a float number and value the current development version From d89d7951e6fb84cdb04cc35e0aa962d59fe6f553 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 23 Dec 2015 11:45:07 -0500 Subject: [PATCH 395/590] fixed tests to follow new invocation structure also added maxdiff setting to see issues clearly when they happen --- .../module_utils/basic/test_exit_json.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py index 931447f8ab..27bbb0f9e5 100644 --- a/test/units/module_utils/basic/test_exit_json.py +++ b/test/units/module_utils/basic/test_exit_json.py @@ -31,8 +31,11 @@ from ansible.module_utils import basic from ansible.module_utils.basic import heuristic_log_sanitize from ansible.module_utils.basic import return_values, remove_values +empty_invocation = {u'module_args': {}} + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitJson(unittest.TestCase): + def setUp(self): self.COMPLEX_ARGS = basic.MODULE_COMPLEX_ARGS basic.MODULE_COMPLEX_ARGS = '{}' @@ -56,7 +59,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=False, invocation={})) + self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation)) def test_exit_json_args_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -67,7 +70,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", changed=False, invocation={})) + self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation)) def test_fail_json_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -78,13 +81,13 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 1) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", failed=True, invocation={})) + self.assertEquals(return_val, dict(msg="message", failed=True, invocation=empty_invocation)) def test_exit_json_proper_changed(self): with self.assertRaises(SystemExit) as ctx: self.module.exit_json(changed=True, msg='success') return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=True, msg='success', invocation={})) + self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation)) @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): @@ -95,21 +98,21 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), (dict(username='person', password='password12345'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), (dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), ) @@ -122,6 +125,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): sys.stdout = self.old_stdout def test_exit_json_removes_values(self): + self.maxDiff = None for args, return_val, expected in self.dataset: sys.stdout = StringIO() basic.MODULE_COMPLEX_ARGS = json.dumps(args) @@ -137,6 +141,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): self.assertEquals(json.loads(sys.stdout.getvalue()), expected) def test_fail_json_removes_values(self): + self.maxDiff = None for args, return_val, expected in self.dataset: expected = copy.deepcopy(expected) del expected['changed'] From fd7e01696f659e1a147887087c87e2bad9742209 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 23 Dec 2015 17:16:21 -0500 Subject: [PATCH 396/590] updated submodule refs to pick up module changes --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fcb3397df7..002028748f 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fcb3397df7944ff15ea698b5717c06e8fc7d43ba +Subproject commit 002028748f080961ade801c30e194bfd4ba043ce diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index c6829752d8..19e496c69c 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit c6829752d852398c255704cd5d7faa54342e143e +Subproject commit 19e496c69c22fc7ec1e3c8306b363a812b85d386 From deac4d00b22f9e0288f5e3c4633e07a7f937d47c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 24 Dec 2015 11:32:40 -0800 Subject: [PATCH 397/590] bigip changes as requested by bcoca and abadger: * Fix to error if validate_cert is True and python doesn't support it. * Only globally disable certificate checking if really needed. Use bigip verify parameter if available instead. * Remove public disable certificate function to make it less likely people will attempt to reuse that --- lib/ansible/module_utils/f5.py | 36 ++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py index e04e6b2f1e..ba336377e7 100644 --- a/lib/ansible/module_utils/f5.py +++ b/lib/ansible/module_utils/f5.py @@ -51,19 +51,35 @@ def f5_argument_spec(): def f5_parse_arguments(module): if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") - if not module.params['validate_certs']: - disable_ssl_cert_validation() + + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition'],module.params['validate_certs']) -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api +def bigip_api(bigip, user, password, validate_certs): + try: + # bigsuds >= 1.0.3 + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs) + except TypeError: + # bigsuds < 1.0.3, no verify param + if validate_certs: + # Note: verified we have SSLContext when we parsed params + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + else: + import ssl + if hasattr(ssl, 'SSLContext'): + # Really, you should never do this. It disables certificate + # verification *globally*. But since older bigip libraries + # don't give us a way to toggle verification we need to + # disable it at the global level. + # From https://www.python.org/dev/peps/pep-0476/#id29 + ssl._create_default_https_context = ssl._create_unverified_context + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context + return api # Fully Qualified name (with the partition) def fq_name(partition,name): From cd9e18d0e52c1915132614e6e2946a26968e3091 Mon Sep 17 00:00:00 2001 From: Stephen Medina Date: Fri, 25 Dec 2015 08:56:08 -0800 Subject: [PATCH 398/590] clarify idempotence explanation Small typo; wasn't sure what to replace it with. --- docsite/rst/intro_adhoc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 9e104d5836..61ba33523a 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -112,7 +112,7 @@ For example, using double rather than single quotes in the above example would evaluate the variable on the box you were on. So far we've been demoing simple command execution, but most Ansible modules usually do not work like -simple scripts. They make the remote system look like you state, and run the commands necessary to +simple scripts. They make the remote system look like a state, and run the commands necessary to get it there. This is commonly referred to as 'idempotence', and is a core design goal of Ansible. However, we also recognize that running arbitrary commands is equally important, so Ansible easily supports both. From d70a97b562da1b06d21a86fd1c7619bfa2b6a2e6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 25 Dec 2015 12:17:22 -0800 Subject: [PATCH 399/590] Update submodule refs --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 19e496c69c..f6a7b6dd1f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 19e496c69c22fc7ec1e3c8306b363a812b85d386 +Subproject commit f6a7b6dd1f7be93ba640c50bf26adeeabb5af46f From c489b271d152820ab11b73d11877f8805318cd7a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 27 Dec 2015 14:17:20 -0500 Subject: [PATCH 400/590] updated release cycle to 4 months instead of 2 --- docsite/rst/intro_installation.rst | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index e986ffd70f..a5ed83a302 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -27,12 +27,11 @@ What Version To Pick? ````````````````````` Because it runs so easily from source and does not require any installation of software on remote -machines, many users will actually track the development version. +machines, many users will actually track the development version. -Ansible's release cycles are usually about two months long. Due to this -short release cycle, minor bugs will generally be fixed in the next release versus maintaining -backports on the stable branch. Major bugs will still have maintenance releases when needed, though -these are infrequent. +Ansible's release cycles are usually about four months long. Due to this short release cycle, +minor bugs will generally be fixed in the next release versus maintaining backports on the stable branch. +Major bugs will still have maintenance releases when needed, though these are infrequent. If you are wishing to run the latest released version of Ansible and you are running Red Hat Enterprise Linux (TM), CentOS, Fedora, Debian, or Ubuntu, we recommend using the OS package manager. From 20005660313b5abc4188704fc3a37a4c25f83e62 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 28 Dec 2015 10:24:28 -0500 Subject: [PATCH 401/590] minor fix to become docs --- docsite/rst/become.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 64628515c6..7597643f88 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -1,5 +1,5 @@ -Ansible Privilege Escalation -++++++++++++++++++++++++++++ +Become (Privilege Escalation) ++++++++++++++++++++++++++++++ Ansible can use existing privilege escalation systems to allow a user to execute tasks as another. @@ -7,17 +7,17 @@ Ansible can use existing privilege escalation systems to allow a user to execute Become `````` -Before 1.9 Ansible mostly allowed the use of sudo and a limited use of su to allow a login/remote user to become a different user -and execute tasks, create resources with the 2nd user's permissions. As of 1.9 'become' supersedes the old sudo/su, while still -being backwards compatible. This new system also makes it easier to add other privilege escalation tools like pbrun (Powerbroker), -pfexec and others. +Before 1.9 Ansible mostly allowed the use of `sudo` and a limited use of `su` to allow a login/remote user to become a different user +and execute tasks, create resources with the 2nd user's permissions. As of 1.9 `become` supersedes the old sudo/su, while still +being backwards compatible. This new system also makes it easier to add other privilege escalation tools like `pbrun` (Powerbroker), +`pfexec` and others. New directives -------------- become - equivalent to adding 'sudo:' or 'su:' to a play or task, set to 'true'/'yes' to activate privilege escalation + equivalent to adding `sudo:` or `su:` to a play or task, set to 'true'/'yes' to activate privilege escalation become_user equivalent to adding 'sudo_user:' or 'su_user:' to a play or task, set to user with desired privileges From 56454d6a9135fb18e5d0545b9162b940cbcb8a78 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 28 Dec 2015 12:25:27 -0500 Subject: [PATCH 402/590] added newer vars to 'reset_vars' these vars pass back info to the task about the connection moved to their own block at start at file for readability and added the newer standard vars --- lib/ansible/playbook/play_context.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 81223500ad..6b19f4c172 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -125,6 +125,18 @@ TASK_ATTRIBUTE_OVERRIDES = ( 'remote_user', ) +RESET_VARS = ( + 'ansible_connection', + 'ansible_ssh_host', + 'ansible_ssh_pass', + 'ansible_ssh_port', + 'ansible_ssh_user', + 'ansible_ssh_private_key_file', + 'ansible_ssh_pipelining', + 'ansible_user', + 'ansible_host', + 'ansible_port', +) class PlayContext(Base): @@ -505,7 +517,8 @@ class PlayContext(Base): # TODO: should we be setting the more generic values here rather than # the more specific _ssh_ ones? - for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file', 'ansible_ssh_pipelining']: + for special_var in RESET_VARS: + if special_var not in variables: for prop, varnames in MAGIC_VARIABLE_MAPPING.items(): if special_var in varnames: From 2d11cfab92f9d26448461b4bc81f466d1910a15e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 29 Dec 2015 11:40:18 -0500 Subject: [PATCH 403/590] Squashed commit of the following: commit 24efa310b58c431b4d888a6315d1285da918f670 Author: James Cammarata Date: Tue Dec 29 11:23:52 2015 -0500 Adding an additional test for copy exclusion Adds a negative test for the situation when an exclusion doesn't exist in the target to be copied. commit 643ba054877cf042177d65e6e2958178bdd2fe88 Merge: e6ee59f 66a8f7e Author: James Cammarata Date: Tue Dec 29 10:59:18 2015 -0500 Merge branch 'speedup' of https://github.com/chrismeyersfsu/ansible into chrismeyersfsu-speedup commit 66a8f7e873ca90f7848e47b04d9b62aed23a45df Author: Chris Meyers Date: Mon Dec 28 09:47:00 2015 -0500 better api and tests added * _copy_results = deepcopy for better performance * _copy_results_exclude to deepcopy but exclude certain fields. Pop fields that do not need to be deep copied. Re-assign popped fields after deep copy so we don't modify the original, to be copied, object. * _copy_results_exclude unit tests commit 93490960ff4e75f38a7cc6f6d49f10f949f1a7da Author: Chris Meyers Date: Fri Dec 25 23:17:26 2015 -0600 remove uneeded deepcopy fields --- lib/ansible/plugins/callback/__init__.py | 19 ++++- test/units/plugins/callback/test_callback.py | 82 ++++++++++++++++++++ 2 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 test/units/plugins/callback/test_callback.py diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 7371fe0a51..cc2a9ad0e7 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -59,9 +59,20 @@ class CallbackBase: version = getattr(self, 'CALLBACK_VERSION', '1.0') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) - def _copy_result(self, result): - ''' helper for callbacks, so they don't all have to include deepcopy ''' - return deepcopy(result) + ''' helper for callbacks, so they don't all have to include deepcopy ''' + _copy_result = deepcopy + + def _copy_result_exclude(self, result, exclude): + values = [] + for e in exclude: + values.append(getattr(result, e)) + setattr(result, e, None) + + result_copy = deepcopy(result) + for i,e in enumerate(exclude): + setattr(result, e, values[i]) + + return result_copy def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): @@ -130,7 +141,7 @@ class CallbackBase: def _process_items(self, result): for res in result._result['results']: - newres = self._copy_result(result) + newres = self._copy_result_exclude(result, ['_result']) res['item'] = self._get_item(res) newres._result = res if 'failed' in res and res['failed']: diff --git a/test/units/plugins/callback/test_callback.py b/test/units/plugins/callback/test_callback.py new file mode 100644 index 0000000000..54964ac9df --- /dev/null +++ b/test/units/plugins/callback/test_callback.py @@ -0,0 +1,82 @@ +# (c) 2012-2014, Chris Meyers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import PY3 +from copy import deepcopy + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, mock_open + +from ansible.plugins.callback import CallbackBase +import ansible.plugins.callback as callish + +class TestCopyResultExclude(unittest.TestCase): + def setUp(self): + class DummyClass(): + def __init__(self): + self.bar = [ 1, 2, 3 ] + self.a = { + "b": 2, + "c": 3, + } + self.b = { + "c": 3, + "d": 4, + } + self.foo = DummyClass() + self.cb = CallbackBase() + + def tearDown(self): + pass + + def test_copy_logic(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertEqual(self.foo.bar, res.bar) + + def test_copy_deep(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertNotEqual(id(self.foo.bar), id(res.bar)) + + def test_no_exclude(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertEqual(self.foo.bar, res.bar) + self.assertEqual(self.foo.a, res.a) + self.assertEqual(self.foo.b, res.b) + + def test_exclude(self): + res = self.cb._copy_result_exclude(self.foo, ['bar', 'b']) + self.assertIsNone(res.bar) + self.assertIsNone(res.b) + self.assertEqual(self.foo.a, res.a) + + def test_result_unmodified(self): + bar_id = id(self.foo.bar) + a_id = id(self.foo.a) + res = self.cb._copy_result_exclude(self.foo, ['bar', 'a']) + + self.assertEqual(self.foo.bar, [ 1, 2, 3 ]) + self.assertEqual(bar_id, id(self.foo.bar)) + + self.assertEqual(self.foo.a, dict(b=2, c=3)) + self.assertEqual(a_id, id(self.foo.a)) + + self.assertRaises(AttributeError, self.cb._copy_result_exclude, self.foo, ['a', 'c', 'bar']) + From d3deb24ead59d5fdbecad3c946848537f95772ad Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 29 Dec 2015 15:41:00 -0500 Subject: [PATCH 404/590] output color is now configurable --- examples/ansible.cfg | 11 ++++++ lib/ansible/cli/galaxy.py | 25 +++++++------- lib/ansible/constants.py | 11 ++++++ lib/ansible/executor/task_executor.py | 2 +- lib/ansible/playbook/__init__.py | 3 +- lib/ansible/plugins/callback/default.py | 46 ++++++++++++------------- lib/ansible/plugins/callback/minimal.py | 17 +++++---- lib/ansible/plugins/callback/oneline.py | 14 ++++---- lib/ansible/utils/color.py | 3 +- lib/ansible/utils/display.py | 14 ++++---- 10 files changed, 86 insertions(+), 60 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index ec3ddf2064..b357738b39 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -262,3 +262,14 @@ # the default behaviour that copies the existing context or uses the user default # needs to be changed to use the file system dependent context. #special_context_filesystems=nfs,vboxsf,fuse,ramfs + +[colors] +#verbose = blue +#warn = bright purple +#error = red +#debug = dark gray +#deprecate = purple +#skip = cyan +#unreachable = red +#ok = green +#changed = yellow diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 34afa03c9f..476a7d0f89 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -514,7 +514,7 @@ class GalaxyCLI(CLI): tags=self.options.tags, author=self.options.author, page_size=page_size) if response['count'] == 0: - display.display("No roles match your search.", color="yellow") + display.display("No roles match your search.", color=C.COLOR_ERROR) return True data = '' @@ -570,10 +570,10 @@ class GalaxyCLI(CLI): colors = { 'INFO': 'normal', - 'WARNING': 'yellow', - 'ERROR': 'red', - 'SUCCESS': 'green', - 'FAILED': 'red' + 'WARNING': C.COLOR_WARN, + 'ERROR': C.COLOR_ERROR, + 'SUCCESS': C.COLOR_OK, + 'FAILED': C.COLOR_ERROR, } if len(self.args) < 2: @@ -592,11 +592,10 @@ class GalaxyCLI(CLI): # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), color='yellow') - display.display("The following Galaxy roles are being updated:" + u'\n', color='yellow') + display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: - display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color='yellow') - display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), - color='yellow') + display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) + display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) @@ -633,17 +632,17 @@ class GalaxyCLI(CLI): # None found display.display("No integrations found.") return 0 - display.display(u'\n' + "ID Source Repo", color="green") - display.display("---------- ---------- ----------", color="green") + display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK) + display.display("---------- ---------- ----------", color=C.COLOR_OK) for secret in secrets: display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], - secret['github_repo']),color="green") + secret['github_repo']),color=C.COLOR_OK) return 0 if self.options.remove_id: # Remove a secret self.api.remove_secret(self.options.remove_id) - display.display("Secret removed. Integrations using this secret will not longer work.", color="green") + display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK) return 0 if len(self.args) < 4: diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 5df9602246..9b84825d6b 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -268,6 +268,17 @@ GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) +# colors +COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue') +COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple') +COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red') +COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray') +COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple') +COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan') +COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red') +COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green') +COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow') + # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'shell', 'raw'] diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index c8b6fa179b..4a2d30a2cd 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -393,7 +393,7 @@ class TaskExecutor: result = None for attempt in range(retries): if attempt > 0: - display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color="dark gray") + display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color=C.COLOR_DEBUG) result['attempts'] = attempt + 1 display.debug("running the handler") diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 0ae443f843..947224d61f 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -25,6 +25,7 @@ from ansible.errors import AnsibleParserError from ansible.playbook.play import Play from ansible.playbook.playbook_include import PlaybookInclude from ansible.plugins import get_all_plugin_loaders +from ansible import constants as C try: from __main__ import display @@ -87,7 +88,7 @@ class Playbook: if pb is not None: self._entries.extend(pb._entries) else: - display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan') + display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color=C.COLOR_SKIP) else: entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader) self._entries.append(entry_obj) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index e515945bba..421104ee83 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -44,7 +44,7 @@ class CallbackModule(CallbackBase): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] @@ -53,12 +53,12 @@ class CallbackModule(CallbackBase): self._process_items(result) else: if delegated_vars: - self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) if result._task.ignore_errors: - self._display.display("...ignoring", color='cyan') + self._display.display("...ignoring", color=C.COLOR_SKIP) def v2_runner_on_ok(self, result): @@ -71,13 +71,13 @@ class CallbackModule(CallbackBase): msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' + color = C.COLOR_CHANGED else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() - color = 'green' + color = C.COLOR_OK if result._task.loop and 'results' in result._result: self._process_items(result) @@ -97,17 +97,17 @@ class CallbackModule(CallbackBase): msg = "skipping: [%s]" % result._host.get_name() if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color='cyan') + self._display.display(msg, color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) def v2_playbook_on_no_hosts_matched(self): - self._display.display("skipping: no hosts matched", color='cyan') + self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP) def v2_playbook_on_no_hosts_remaining(self): self._display.banner("NO MORE HOSTS LEFT") @@ -117,7 +117,7 @@ class CallbackModule(CallbackBase): if self._display.verbosity > 2: path = task.get_path() if path: - self._display.display("task path: %s" % path, color='dark gray') + self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) def v2_playbook_on_cleanup_task_start(self, task): self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) @@ -155,13 +155,13 @@ class CallbackModule(CallbackBase): msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' + color = C.COLOR_CHANGED else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() - color = 'green' + color = C.COLOR_OK msg += " => (item=%s)" % (result._result['item'],) @@ -179,15 +179,15 @@ class CallbackModule(CallbackBase): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] if delegated_vars: - self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red') + self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') + self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) self._handle_warnings(result._result) @@ -195,12 +195,12 @@ class CallbackModule(CallbackBase): msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item']) if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color='cyan') + self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_include(self, included_file): msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) - color = 'cyan' - self._display.display(msg, color='cyan') + color = C.COLOR_SKIP + self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_stats(self, stats): self._display.banner("PLAY RECAP") @@ -211,10 +211,10 @@ class CallbackModule(CallbackBase): self._display.display(u"%s : %s %s %s %s" % ( hostcolor(h, t), - colorize(u'ok', t['ok'], 'green'), - colorize(u'changed', t['changed'], 'yellow'), - colorize(u'unreachable', t['unreachable'], 'red'), - colorize(u'failed', t['failures'], 'red')), + colorize(u'ok', t['ok'], C.COLOR_OK), + colorize(u'changed', t['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', t['failures'], C.COLOR_ERROR)), screen_only=True ) diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index 71f9f5dfee..9fa257af74 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -53,29 +53,32 @@ class CallbackModule(CallbackBase): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color='red') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR) else: - self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red') + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR) def v2_runner_on_ok(self, result): self._clean_results(result._result, result._task.action) if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color=C.COLOR_OK) else: - self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green') + if 'changed' in result._result and result._result['changed']: + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_CHANGED) + else: + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_OK) self._handle_warnings(result._result) def v2_runner_on_skipped(self, result): - self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') + self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): - self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='yellow') + self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE) def v2_on_file_diff(self, result): if 'diff' in result._result and result._result['diff']: diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py index a99b680c05..0f6283fd44 100644 --- a/lib/ansible/plugins/callback/oneline.py +++ b/lib/ansible/plugins/callback/oneline.py @@ -52,24 +52,24 @@ class CallbackModule(CallbackBase): msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','') if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color=C.COLOR_ERROR) else: - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] - self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red') + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR) def v2_runner_on_ok(self, result): if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK) else: - self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green') + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK) def v2_runner_on_unreachable(self, result): - self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') + self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color=C.COLOR_UNREACHABLE) def v2_runner_on_skipped(self, result): - self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') + self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP) diff --git a/lib/ansible/utils/color.py b/lib/ansible/utils/color.py index 55060ace04..81a05d749e 100644 --- a/lib/ansible/utils/color.py +++ b/lib/ansible/utils/color.py @@ -62,7 +62,8 @@ codeCodes = { 'purple': u'0;35', 'bright red': u'1;31', 'yellow': u'0;33', 'bright purple': u'1;35', 'dark gray': u'1;30', 'bright yellow': u'1;33', - 'normal': u'0' + 'magenta': u'0;35', 'bright magenta': u'1;35', + 'normal': u'0' , } def stringc(text, color): diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 3d51f17de4..8700a51018 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -145,7 +145,7 @@ class Display: # characters that are invalid in the user's locale msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr)) - if color == 'red': + if color == C.COLOR_ERROR: logger.error(msg2) else: logger.info(msg2) @@ -168,7 +168,7 @@ class Display: def debug(self, msg): if C.DEFAULT_DEBUG: debug_lock.acquire() - self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray') + self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG) debug_lock.release() def verbose(self, msg, host=None, caplevel=2): @@ -176,9 +176,9 @@ class Display: #msg = utils.sanitize_output(msg) if self.verbosity > caplevel: if host is None: - self.display(msg, color='blue') + self.display(msg, color=C.COLOR_VERBOSE) else: - self.display("<%s> %s" % (host, msg), color='blue', screen_only=True) + self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, screen_only=True) def deprecated(self, msg, version=None, removed=False): ''' used to print out a deprecation message.''' @@ -199,7 +199,7 @@ class Display: new_msg = "\n".join(wrapped) + "\n" if new_msg not in self._deprecations: - self.display(new_msg.strip(), color='purple', stderr=True) + self.display(new_msg.strip(), color=C.COLOR_DEPRECATE, stderr=True) self._deprecations[new_msg] = 1 def warning(self, msg): @@ -207,7 +207,7 @@ class Display: wrapped = textwrap.wrap(new_msg, self.columns) new_msg = "\n".join(wrapped) + "\n" if new_msg not in self._warns: - self.display(new_msg, color='bright purple', stderr=True) + self.display(new_msg, color=C.COLOR_WARN, stderr=True) self._warns[new_msg] = 1 def system_warning(self, msg): @@ -258,7 +258,7 @@ class Display: else: new_msg = msg if new_msg not in self._errors: - self.display(new_msg, color='red', stderr=True) + self.display(new_msg, color=C.COLOR_ERROR, stderr=True) self._errors[new_msg] = 1 @staticmethod From 5accc9858739d2184235bf8722b83ff7bcc97056 Mon Sep 17 00:00:00 2001 From: mgarstecki Date: Wed, 30 Dec 2015 11:57:12 +0100 Subject: [PATCH 405/590] Correction of a double negation The sentence seemed to imply that return codes from modules are significant, while they are not. The second part of the sentence confirms this, as it advises to use standard return codes only for future proofing. --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index d3781b2f7f..5d664d5631 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -347,7 +347,7 @@ and guidelines: * In the event of failure, a key of 'failed' should be included, along with a string explanation in 'msg'. Modules that raise tracebacks (stacktraces) are generally considered 'poor' modules, though Ansible can deal with these returns and will automatically convert anything unparseable into a failed result. If you are using the AnsibleModule common Python code, the 'failed' element will be included for you automatically when you call 'fail_json'. -* Return codes from modules are not actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing. +* Return codes from modules are actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing. * As results from many hosts will be aggregated at once, modules should return only relevant output. Returning the entire contents of a log file is generally bad form. From 946b82bef71d3b2d4ecf07ec937b650634bc84a0 Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Wed, 30 Dec 2015 18:21:34 +0100 Subject: [PATCH 406/590] shred ansible-vault tmp_file. Also when editor is interruped. --- lib/ansible/parsing/vault/__init__.py | 35 ++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index d8cf66feca..b7304d156f 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -219,7 +219,27 @@ class VaultEditor: def __init__(self, password): self.vault = VaultLib(password) - + + def _shred_file(self, tmp_path): + """securely destroy a decrypted file.""" + def generate_data(length): + import string, random + chars = string.ascii_lowercase + string.ascii_uppercase + string.digits + return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) + + if not os.path.isfile(tmp_path): + # file is already gone + return + + ld = os.path.getsize(tmp_path) + passes = 3 + with open(tmp_path, "w") as fh: + for _ in range(int(passes)): + data = generate_data(ld) + fh.write(data) + fh.seek(0, 0) + os.remove(tmp_path) + def _edit_file_helper(self, filename, existing_data=None, force_save=False): # Create a tempfile @@ -229,12 +249,18 @@ class VaultEditor: self.write_data(existing_data, tmp_path) # drop the user into an editor on the tmp file - call(self._editor_shell_command(tmp_path)) + try: + call(self._editor_shell_command(tmp_path)) + except: + # whatever happens, destroy the decrypted file + self._shred_file(tmp_path) + raise + tmpdata = self.read_data(tmp_path) # Do nothing if the content has not changed if existing_data == tmpdata and not force_save: - os.remove(tmp_path) + self._shred_file(tmp_path) return # encrypt new data and write out to tmp @@ -329,7 +355,7 @@ class VaultEditor: sys.stdout.write(bytes) else: if os.path.isfile(filename): - os.remove(filename) + self._shred_file(filename) with open(filename, "wb") as fh: fh.write(bytes) @@ -338,6 +364,7 @@ class VaultEditor: # overwrite dest with src if os.path.isfile(dest): prev = os.stat(dest) + # old file 'dest' was encrypted, no need to _shred_file os.remove(dest) shutil.move(src, dest) From e39e8ba308364f16e3b74db96b15415ab97b5f52 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 30 Dec 2015 13:49:39 -0500 Subject: [PATCH 407/590] Fix logic mistake in unarchive action plugin --- lib/ansible/plugins/action/unarchive.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py index cd89b936fe..b6c43a3c59 100644 --- a/lib/ansible/plugins/action/unarchive.py +++ b/lib/ansible/plugins/action/unarchive.py @@ -69,14 +69,14 @@ class ActionModule(ActionBase): source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source) remote_checksum = self._remote_checksum(dest, all_vars=task_vars) - if remote_checksum != '3': - result['failed'] = True - result['msg'] = "dest '%s' must be an existing dir" % dest - return result - elif remote_checksum == '4': + if remote_checksum == '4': result['failed'] = True result['msg'] = "python isn't present on the system. Unable to compute checksum" return result + elif remote_checksum != '3': + result['failed'] = True + result['msg'] = "dest '%s' must be an existing dir" % dest + return result if copy: # transfer the file to a remote tmp location From 5c34be15b1c800a513a88005c6e6b05f360dfef1 Mon Sep 17 00:00:00 2001 From: Thilo Uttendorfer Date: Thu, 31 Dec 2015 02:31:38 +0100 Subject: [PATCH 408/590] Fix unsupported format character --- lib/ansible/utils/module_docs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 4a90c3caca..14a5d03056 100755 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -67,7 +67,7 @@ def get_docstring(filename, verbose=False): theid = t.id except AttributeError as e: # skip errors can happen when trying to use the normal code - display.warning("Failed to assign id for %t on %s, skipping" % (t, filename)) + display.warning("Failed to assign id for %s on %s, skipping" % (t, filename)) continue if 'DOCUMENTATION' in theid: From c4d2dbfcdbf8743760d658f1bcbec23e912514a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Fri, 1 Jan 2016 15:55:51 +0100 Subject: [PATCH 409/590] Replace to_string by to_unicode. Fix https://github.com/ansible/ansible/issues/13707 --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 095118e50e..885005960f 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -112,7 +112,7 @@ class Inventory(object): try: (host, port) = parse_address(h, allow_ranges=False) except AnsibleError as e: - display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_string(e)) + display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e)) host = h port = None all.add_host(Host(host, port)) From 6f2f7a79b34910a75e6eafde5a7872b3e7bcb770 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 1 Jan 2016 21:52:41 -0500 Subject: [PATCH 410/590] add support for diff in file settings this allows modules to report on what specifically changed when using common file functions --- lib/ansible/module_utils/basic.py | 61 ++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 6fd382aa49..1366bfceb4 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -751,7 +751,7 @@ class AnsibleModule(object): context = self.selinux_default_context(path) return self.set_context_if_different(path, context, False) - def set_context_if_different(self, path, context, changed): + def set_context_if_different(self, path, context, changed, diff=None): if not HAVE_SELINUX or not self.selinux_enabled(): return changed @@ -772,6 +772,14 @@ class AnsibleModule(object): new_context[i] = cur_context[i] if cur_context != new_context: + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['secontext'] = cur_context + if 'after' not in diff: + diff['after'] = {} + diff['after']['secontext'] = new_context + try: if self.check_mode: return True @@ -785,7 +793,7 @@ class AnsibleModule(object): changed = True return changed - def set_owner_if_different(self, path, owner, changed): + def set_owner_if_different(self, path, owner, changed, diff=None): path = os.path.expanduser(path) if owner is None: return changed @@ -798,6 +806,15 @@ class AnsibleModule(object): except KeyError: self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner) if orig_uid != uid: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['owner'] = orig_uid + if 'after' not in diff: + diff['after'] = {} + diff['after']['owner'] = uid + if self.check_mode: return True try: @@ -807,7 +824,7 @@ class AnsibleModule(object): changed = True return changed - def set_group_if_different(self, path, group, changed): + def set_group_if_different(self, path, group, changed, diff=None): path = os.path.expanduser(path) if group is None: return changed @@ -820,6 +837,15 @@ class AnsibleModule(object): except KeyError: self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group) if orig_gid != gid: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['group'] = orig_gid + if 'after' not in diff: + diff['after'] = {} + diff['after']['group'] = gid + if self.check_mode: return True try: @@ -829,7 +855,7 @@ class AnsibleModule(object): changed = True return changed - def set_mode_if_different(self, path, mode, changed): + def set_mode_if_different(self, path, mode, changed, diff=None): path = os.path.expanduser(path) path_stat = os.lstat(path) @@ -851,6 +877,15 @@ class AnsibleModule(object): prev_mode = stat.S_IMODE(path_stat.st_mode) if prev_mode != mode: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['mode'] = prev_mode + if 'after' not in diff: + diff['after'] = {} + diff['after']['mode'] = mode + if self.check_mode: return True # FIXME: comparison against string above will cause this to be executed @@ -984,27 +1019,27 @@ class AnsibleModule(object): or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm] return reduce(or_reduce, perms, 0) - def set_fs_attributes_if_different(self, file_args, changed): + def set_fs_attributes_if_different(self, file_args, changed, diff=None): # set modes owners and context as needed changed = self.set_context_if_different( - file_args['path'], file_args['secontext'], changed + file_args['path'], file_args['secontext'], changed, diff ) changed = self.set_owner_if_different( - file_args['path'], file_args['owner'], changed + file_args['path'], file_args['owner'], changed, diff ) changed = self.set_group_if_different( - file_args['path'], file_args['group'], changed + file_args['path'], file_args['group'], changed, diff ) changed = self.set_mode_if_different( - file_args['path'], file_args['mode'], changed + file_args['path'], file_args['mode'], changed, diff ) return changed - def set_directory_attributes_if_different(self, file_args, changed): - return self.set_fs_attributes_if_different(file_args, changed) + def set_directory_attributes_if_different(self, file_args, changed, diff=None): + return self.set_fs_attributes_if_different(file_args, changed, diff) - def set_file_attributes_if_different(self, file_args, changed): - return self.set_fs_attributes_if_different(file_args, changed) + def set_file_attributes_if_different(self, file_args, changed, diff=None): + return self.set_fs_attributes_if_different(file_args, changed, diff) def add_path_info(self, kwargs): ''' From 210cf06d9ac8e62b15d6f34e9c63c1b98986a1d5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 2 Jan 2016 00:31:09 -0500 Subject: [PATCH 411/590] Tweak how strategies evaluate failed hosts via the iterator and bug fixes * Added additional methods to the iterator code to assess host failures while also taking into account the block rescue/always states * Fixed bugs in the free strategy, where results were not always being processed after being collected * Added some prettier printing to the state output from iterator Fixes #13699 --- lib/ansible/executor/play_iterator.py | 46 ++++++++++++++++++++++++-- lib/ansible/plugins/strategy/free.py | 12 ++----- lib/ansible/plugins/strategy/linear.py | 5 +-- 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 534f216c30..147e46e5aa 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -57,14 +57,32 @@ class HostState: self.always_child_state = None def __repr__(self): - return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % ( + def _run_state_to_string(n): + states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"] + try: + return states[n] + except IndexError: + return "UNKNOWN STATE" + + def _failed_state_to_string(n): + states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"} + if n == 0: + return "FAILED_NONE" + else: + ret = [] + for i in (1, 2, 4, 8): + if n & i: + ret.append(states[i]) + return "|".join(ret) + + return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % ( self.cur_block, self.cur_regular_task, self.cur_rescue_task, self.cur_always_task, self.cur_role, - self.run_state, - self.fail_state, + _run_state_to_string(self.run_state), + _failed_state_to_string(self.fail_state), self.pending_setup, self.tasks_child_state, self.rescue_child_state, @@ -347,6 +365,28 @@ class PlayIterator: def get_failed_hosts(self): return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE) + def _check_failed_state(self, state): + if state is None: + return False + elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state): + return True + elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state): + return True + elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state): + return True + elif state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE: + if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0: + return False + elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0: + return False + else: + return True + return False + + def is_failed(self, host): + s = self.get_host_state(host) + return self._check_failed_state(s) + def get_original_task(self, host, task): ''' Finds the task in the task list which matches the UUID of the given task. diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index f4fc1226a1..976d33abba 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -78,7 +78,7 @@ class StrategyModule(StrategyBase): (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state) display.debug("free host task: %s" % task) - if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task: + if not iterator.is_failed(host) and host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done @@ -135,7 +135,7 @@ class StrategyModule(StrategyBase): if last_host == starting_host: break - results = self._process_pending_results(iterator) + results = self._wait_on_pending_results(iterator) host_results.extend(results) try: @@ -176,13 +176,7 @@ class StrategyModule(StrategyBase): display.debug("done adding collected blocks to iterator") # pause briefly so we don't spin lock - time.sleep(0.05) - - try: - results = self._wait_on_pending_results(iterator) - host_results.extend(results) - except Exception as e: - pass + time.sleep(0.001) # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 7bb227dbae..bfa2c37ce4 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -54,7 +54,8 @@ class StrategyModule(StrategyBase): host_tasks = {} display.debug("building list of next tasks for hosts") for host in hosts: - host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) + if not iterator.is_failed(host): + host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) display.debug("done building task lists") num_setups = 0 @@ -98,7 +99,7 @@ class StrategyModule(StrategyBase): rvals = [] display.debug("starting to advance hosts") for host in hosts: - host_state_task = host_tasks[host.name] + host_state_task = host_tasks.get(host.name) if host_state_task is None: continue (s, t) = host_state_task From 7193d27acc7719b25b70eb4709964d0c93796162 Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Mon, 4 Jan 2016 17:19:35 +0100 Subject: [PATCH 412/590] add os.fsync() so that the shredding data (hopefully) hits the drive --- lib/ansible/parsing/vault/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index b7304d156f..1eca0cd571 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -235,9 +235,10 @@ class VaultEditor: passes = 3 with open(tmp_path, "w") as fh: for _ in range(int(passes)): + fh.seek(0, 0) data = generate_data(ld) fh.write(data) - fh.seek(0, 0) + os.fsync(fh) os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): From 8599c566701582024c6eaeeb5cf52d249f48a49e Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 4 Jan 2016 17:46:40 +0100 Subject: [PATCH 413/590] Do not set 'changed' to True when using group_by Since group_by is not changing in any way to the remote system, there is no change. This also make things more consistent with the set_fact plugin. --- lib/ansible/plugins/action/group_by.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py index a891d3c70d..99f9db2a88 100644 --- a/lib/ansible/plugins/action/group_by.py +++ b/lib/ansible/plugins/action/group_by.py @@ -40,6 +40,6 @@ class ActionModule(ActionBase): group_name = self._task.args.get('key') group_name = group_name.replace(' ','-') - result['changed'] = True + result['changed'] = False result['add_group'] = group_name return result From 1e911375e850e79295d053f3e3c45c9d9d247159 Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Mon, 4 Jan 2016 18:13:59 +0100 Subject: [PATCH 414/590] add docs, remove unnecessary int() cast --- lib/ansible/parsing/vault/__init__.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 1eca0cd571..28e819860a 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -221,7 +221,22 @@ class VaultEditor: self.vault = VaultLib(password) def _shred_file(self, tmp_path): - """securely destroy a decrypted file.""" + """Securely destroy a decrypted file + + Inspired by unix `shred', try to destroy the secrets "so that they can be + recovered only with great difficulty with specialised hardware, if at all". + + See https://github.com/ansible/ansible/pull/13700 . + + Note that: + - For flash: overwriting would have no effect (due to wear leveling). But the + added disk wear is considered insignificant. + - For other storage systems: the filesystem lies to the vfs (kernel), the disk + driver lies to the filesystem and the disk lies to the driver. But it's better + than nothing. + - most tmp dirs are now tmpfs (ramdisks), for which this is a non-issue. + """ + def generate_data(length): import string, random chars = string.ascii_lowercase + string.ascii_uppercase + string.digits @@ -234,7 +249,7 @@ class VaultEditor: ld = os.path.getsize(tmp_path) passes = 3 with open(tmp_path, "w") as fh: - for _ in range(int(passes)): + for _ in range(passes): fh.seek(0, 0) data = generate_data(ld) fh.write(data) From de529c17340074b1d96937cf4d688da0a7e3bd31 Mon Sep 17 00:00:00 2001 From: "Fuentes, Christopher" Date: Mon, 4 Jan 2016 13:52:06 -0500 Subject: [PATCH 415/590] minor grammar error was making me pull hair out --- docsite/rst/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 90b9a1cb09..e51a1751fe 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -38,7 +38,7 @@ You can also dictate the connection type to be used, if you want:: foo.example.com bar.example.com -You may also wish to keep these in group variables instead, or file in them in a group_vars/ file. +You may also wish to keep these in group variables instead, or file them in a group_vars/ file. See the rest of the documentation for more information about how to organize variables. .. _use_ssh: From 151e09d129d63ce485d42d3f6cf0915bb8bd8cee Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Tue, 5 Jan 2016 01:34:45 +0100 Subject: [PATCH 416/590] use unix shred if possible, otherwise fast custom impl; do not shred encrypted file --- lib/ansible/parsing/vault/__init__.py | 90 ++++++++++++++++++--------- 1 file changed, 62 insertions(+), 28 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 28e819860a..bcd038c8b8 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -219,41 +219,67 @@ class VaultEditor: def __init__(self, password): self.vault = VaultLib(password) + + def _shred_file_custom(self, tmp_path): + """"Destroy a file, when shred (core-utils) is not available - def _shred_file(self, tmp_path): - """Securely destroy a decrypted file + Unix `shred' destroys files "so that they can be recovered only with great difficulty with + specialised hardware, if at all". It is based on the method from the paper + "Secure Deletion of Data from Magnetic and Solid-State Memory", + Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996). - Inspired by unix `shred', try to destroy the secrets "so that they can be - recovered only with great difficulty with specialised hardware, if at all". + We do not go to that length to re-implement shred in Python; instead, overwriting with a block + of random data should suffice. See https://github.com/ansible/ansible/pull/13700 . - - Note that: - - For flash: overwriting would have no effect (due to wear leveling). But the - added disk wear is considered insignificant. - - For other storage systems: the filesystem lies to the vfs (kernel), the disk - driver lies to the filesystem and the disk lies to the driver. But it's better - than nothing. - - most tmp dirs are now tmpfs (ramdisks), for which this is a non-issue. """ - def generate_data(length): - import string, random - chars = string.ascii_lowercase + string.ascii_uppercase + string.digits - return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) + file_len = os.path.getsize(tmp_path) + + passes = 3 + with open(tmp_path, "wb") as fh: + for _ in range(passes): + fh.seek(0, 0) + # get a random chunk of data + data = os.urandom(min(1024*1024*2, file_len)) + bytes_todo = file_len + while bytes_todo > 0: + chunk = data[:bytes_todo] + fh.write(chunk) + bytes_todo -= len(chunk) + + assert(fh.tell() == file_len) + os.fsync(fh) + + + def _shred_file(self, tmp_path): + """Securely destroy a decrypted file + + Note standard limitations of GNU shred apply (For flash, overwriting would have no effect + due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never + guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks), + it is a non-issue. + + Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is + a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on + a custom shredding method. + """ if not os.path.isfile(tmp_path): # file is already gone return + + try: + r = call(['shred', tmp_path]) + except OSError as e: + # shred is not available on this system, or some other error occured. + self._shred_file_custom(tmp_path) + r = 0 + + if r != 0: + # we could not successfully execute unix shred; therefore, do custom shred. + self._shred_file_custom(tmp_path) - ld = os.path.getsize(tmp_path) - passes = 3 - with open(tmp_path, "w") as fh: - for _ in range(passes): - fh.seek(0, 0) - data = generate_data(ld) - fh.write(data) - os.fsync(fh) os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): @@ -262,7 +288,7 @@ class VaultEditor: _, tmp_path = tempfile.mkstemp() if existing_data: - self.write_data(existing_data, tmp_path) + self.write_data(existing_data, tmp_path, shred=False) # drop the user into an editor on the tmp file try: @@ -300,7 +326,7 @@ class VaultEditor: ciphertext = self.read_data(filename) plaintext = self.vault.decrypt(ciphertext) - self.write_data(plaintext, output_file or filename) + self.write_data(plaintext, output_file or filename, shred=False) def create_file(self, filename): """ create a new encrypted file """ @@ -365,13 +391,21 @@ class VaultEditor: return data - def write_data(self, data, filename): + def write_data(self, data, filename, shred=True): + """write data to given path + + if shred==True, make sure that the original data is first shredded so + that is cannot be recovered + """ bytes = to_bytes(data, errors='strict') if filename == '-': sys.stdout.write(bytes) else: if os.path.isfile(filename): - self._shred_file(filename) + if shred: + self._shred_file(filename) + else: + os.remove(filename) with open(filename, "wb") as fh: fh.write(bytes) From 0d7c3284595c34f53c903995b8dff5fc65303c89 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Mon, 4 Jan 2016 19:52:37 -0500 Subject: [PATCH 417/590] fixed css minification make target for docsite --- docsite/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/Makefile b/docsite/Makefile index 92129f7851..15347f84bf 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -43,4 +43,4 @@ modules: $(FORMATTER) ../hacking/templates/rst.j2 PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ staticmin: - cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css + cat _themes/srtd/static/css/theme.css | sed -e 's/^[ ]*//g; s/[ ]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css From 692ef6dcc90cf696b4bc25bedb979150adf6e7b9 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Mon, 4 Jan 2016 19:58:51 -0500 Subject: [PATCH 418/590] made docsite ads configurable by marketing --- docsite/_themes/srtd/layout.html | 22 ++++++++++++---------- docsite/_themes/srtd/static/css/theme.css | 21 ++------------------- 2 files changed, 14 insertions(+), 29 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 16f0d8d266..1408be8165 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -166,7 +166,7 @@

- +
@@ -189,15 +189,17 @@
- - -
- -
-
- -
-
+ + {% include "breadcrumbs.html" %}
diff --git a/docsite/_themes/srtd/static/css/theme.css b/docsite/_themes/srtd/static/css/theme.css index 4f7cbc8caa..246e513b79 100644 --- a/docsite/_themes/srtd/static/css/theme.css +++ b/docsite/_themes/srtd/static/css/theme.css @@ -4723,33 +4723,16 @@ span[id*='MathJax-Span'] { padding: 0.4045em 1.618em; } - .DocSiteBanner { - width: 100%; display: flex; display: -webkit-flex; + justify-content: center; + -webkit-justify-content: center; flex-wrap: wrap; -webkit-flex-wrap: wrap; - justify-content: space-between; - -webkit-justify-content: space-between; - background-color: #ff5850; margin-bottom: 25px; } .DocSiteBanner-imgWrapper { max-width: 100%; } - -@media screen and (max-width: 1403px) { - .DocSiteBanner { - width: 100%; - display: flex; - display: -webkit-flex; - flex-wrap: wrap; - -webkit-flex-wrap: wrap; - justify-content: center; - -webkit-justify-content: center; - background-color: #fff; - margin-bottom: 25px; - } -} From 1c3b16c2ddf42c687738687cbc1a708cd05d2112 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Mon, 4 Jan 2016 20:02:01 -0500 Subject: [PATCH 419/590] udpate copyright date --- docsite/_themes/srtd/footer.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html index b70cfde7ad..30b02a8978 100644 --- a/docsite/_themes/srtd/footer.html +++ b/docsite/_themes/srtd/footer.html @@ -13,7 +13,7 @@

- © Copyright 2015 Ansible, Inc.. + © Copyright 2016 Ansible, Inc.. {%- if last_updated %} {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} From 559ba467c09b112ecd7dc8681888b6631fcacba3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 11:11:50 -0800 Subject: [PATCH 420/590] Revert "Convert to bytes later so that make_become_command can jsut operate on text type." This reverts commit c4da5840b5e38aea1740e68f7100256c93dfbb17. Going to do this in the connection plugins --- lib/ansible/plugins/action/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 5383f8afd4..e54898b6db 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -488,6 +488,8 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' + # We may need to revisit this later. + cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd @@ -504,7 +506,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): cmd = self._play_context.make_become_cmd(cmd, executable=executable) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) - rc, stdout, stderr = self._connection.exec_command(to_bytes(cmd, errors='strict'), in_data=in_data, sudoable=sudoable) + rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type From 1ed3a018eb27dd06b08dbad57a162c2865abb635 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 11:12:14 -0800 Subject: [PATCH 421/590] Revert "Fix make tests-py3 on devel. Fix for https://github.com/ansible/ansible/issues/13638." This reverts commit e70061334aa99bee466295980f4cd4146096dc29. Going to do this in the connection plugins --- test/units/plugins/action/test_action.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index dcd0437595..0e47b6a538 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -42,14 +42,14 @@ class TestActionBase(unittest.TestCase): play_context.become = True play_context.become_user = play_context.remote_user = 'root' - play_context.make_become_cmd = Mock(return_value=b'CMD') + play_context.make_become_cmd = Mock(return_value='CMD') - action_base._low_level_execute_command(b'ECHO', sudoable=True) + action_base._low_level_execute_command('ECHO', sudoable=True) play_context.make_become_cmd.assert_not_called() play_context.remote_user = 'apo' - action_base._low_level_execute_command(b'ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with(b'ECHO', executable=None) + action_base._low_level_execute_command('ECHO', sudoable=True) + play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None) play_context.make_become_cmd.reset_mock() @@ -57,7 +57,7 @@ class TestActionBase(unittest.TestCase): C.BECOME_ALLOW_SAME_USER = True try: play_context.remote_user = 'root' - action_base._low_level_execute_command(b'ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with(b'ECHO SAME', executable=None) + action_base._low_level_execute_command('ECHO SAME', sudoable=True) + play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None) finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 8d57ffd16bd1025f7b04127fec760c13aca6d6dd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 11:12:41 -0800 Subject: [PATCH 422/590] Revert "Transform the command we pass to subprocess into a byte string in _low_level-exec_command" This reverts commit 0c013f592a31c06baac7aadf27d23598f6abe931. Going to do this in the connection plugin --- lib/ansible/plugins/action/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e54898b6db..3f4fff588e 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -488,8 +488,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - # We may need to revisit this later. - cmd = to_bytes(cmd, errors='strict') + if executable is not None: cmd = executable + ' -c ' + cmd From 9e32099b5e0535c2daf656e9d619e9a2efe9d3b6 Mon Sep 17 00:00:00 2001 From: Bruno Almeida do Lago Date: Tue, 5 Jan 2016 16:48:49 +1300 Subject: [PATCH 423/590] Added OpenStack dynamic inventory example Added an example illustrating how to use the OpenStack dynamic inventory script to the "Dynamic Inventory" section. --- docsite/rst/intro_dynamic_inventory.rst | 71 +++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 5f491ebc2e..85feaa143b 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -206,6 +206,77 @@ explicitly clear the cache, you can run the ec2.py script with the ``--refresh-c # ./ec2.py --refresh-cache +.. _openstack_example: + +Example: OpenStack External Inventory Script +```````````````````````````````````````````` + +If you use an OpenStack based cloud, instead of manually maintaining your own inventory file, you can use the openstack.py dynamic inventory to pull information about your compute instances directly from OpenStack. + +You can download the latest version of the OpenStack inventory script at: https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + +You can use the inventory script explicitly (by passing the `-i openstack.py` argument to Ansible) or implicitly (by placing the script at `/etc/ansible/hosts`). + +Explicit use of inventory script +++++++++++++++++++++++++++++++++ + +Download the latest version of the OpenStack dynamic inventory script and make it executable:: + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + chmod +x openstack.py + +Source an OpenStack RC file:: + + source openstack.rc + +.. note:: + + An OpenStack RC file contains the environment variables required by the client tools to establish a connection with the cloud provider, such as the authentication URL, user name, password and region name. For more information on how to download, create or source an OpenStack RC file, please refer to http://docs.openstack.org/cli-reference/content/cli_openrc.html. + +You can confirm the file has been successfully sourced by running a simple command, such as `nova list` and ensuring it return no errors. + +.. note:: + + The OpenStack command line clients are required to run the `nova list` command. For more information on how to install them, please refer to http://docs.openstack.org/cli-reference/content/install_clients.html. + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:: + + ./openstack.py --list + +After a few moments you should see some JSON output with information about your compute instances. + +Once you confirm the dynamic inventory script is working as expected, you can tell Ansible to use the `openstack.py` script as an inventory file, as illustrated below:: + +ansible -i openstack.py all -m ping + +Implicit use of inventory script +++++++++++++++++++++++++++++++++ + +Download the latest version of the OpenStack dynamic inventory script, make it executable and copy it to `/etc/ansible/hosts`:: + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + chmod +x openstack.py + sudo cp openstack.py /etc/ansible/hosts + +Download the sample configuration file, modify it to suit your needs and copy it to /etc/ansible/openstack.yml + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.yml + vi openstack.yml + sudo cp openstack.yml /etc/ansible/ + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:: + + /etc/ansible/hosts --list + +After a few moments you should see some JSON output with information about your compute instances. + +Refresh the cache ++++++++++++++++++ + +Note that the OpenStack dynamic inventory script will cache results to avoid repeated API calls. To explicitly clear the cache, you can run the openstack.py (or hosts) script with the --refresh parameter: + + ./openstack.py --refresh + .. _other_inventory_scripts: Other inventory scripts From c0a8cd950b909983cdc763f80495595d68597089 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 Jan 2016 19:23:12 -0800 Subject: [PATCH 424/590] Fix problems with non-ascii values passed as part of the command to connection plugins @drybjed discovered this with non-ascii environment variables and command line arguments to script and raw module. --- lib/ansible/plugins/connection/__init__.py | 1 + lib/ansible/plugins/connection/chroot.py | 2 + lib/ansible/plugins/connection/docker.py | 7 ++- lib/ansible/plugins/connection/jail.py | 6 ++- lib/ansible/plugins/connection/libvirt_lxc.py | 6 ++- lib/ansible/plugins/connection/local.py | 11 ++++- lib/ansible/plugins/connection/ssh.py | 17 +++++-- lib/ansible/plugins/connection/zone.py | 8 ++-- test/integration/unicode-test-script | 7 +++ test/integration/unicode.yml | 45 +++++++++++++++++++ 10 files changed, 97 insertions(+), 13 deletions(-) create mode 100755 test/integration/unicode-test-script diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 06616bac4c..ff00bc0238 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -91,6 +91,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): @property def connected(self): + '''Read-only property holding whether the connection to the remote host is active or closed.''' return self._connected def _become_method_supported(self): diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py index c86ea1fc35..ba41ffb5d8 100644 --- a/lib/ansible/plugins/connection/chroot.py +++ b/lib/ansible/plugins/connection/chroot.py @@ -30,6 +30,7 @@ from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase from ansible.module_utils.basic import is_executable +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -90,6 +91,7 @@ class Connection(ConnectionBase): local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd), host=self.chroot) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 4e08f56a09..ce556a1431 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -36,6 +36,7 @@ from distutils.version import LooseVersion import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -125,7 +126,8 @@ class Connection(ConnectionBase): # -i is needed to keep stdin open which allows pipelining to work local_cmd = [self.docker_cmd, "exec", '-i', self._play_context.remote_addr, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self._play_context.remote_addr) + display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -159,6 +161,7 @@ class Connection(ConnectionBase): if self.can_copy_bothways: # only docker >= 1.8.1 can do this natively args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ] + args = map(to_bytes, args) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: @@ -170,6 +173,7 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c", "dd of={0} bs={1}".format(out_path, BUFSIZE)] + args = map(to_bytes, args) with open(in_path, 'rb') as in_file: try: p = subprocess.Popen(args, stdin=in_file, @@ -192,6 +196,7 @@ class Connection(ConnectionBase): out_dir = os.path.dirname(out_path) args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir] + args = map(to_bytes, args) p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py index e665692543..8f88b6ad28 100644 --- a/lib/ansible/plugins/connection/jail.py +++ b/lib/ansible/plugins/connection/jail.py @@ -30,6 +30,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -83,7 +84,7 @@ class Connection(ConnectionBase): return stdout.split() def get_jail_path(self): - p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'], + p = subprocess.Popen([self.jls_cmd, '-j', to_bytes(self.jail), '-q', 'path'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -109,7 +110,8 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.jail) + display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py index dc82d98404..3bfff8b1c3 100644 --- a/lib/ansible/plugins/connection/libvirt_lxc.py +++ b/lib/ansible/plugins/connection/libvirt_lxc.py @@ -30,6 +30,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -65,7 +66,7 @@ class Connection(ConnectionBase): return cmd def _check_domain(self, domain): - p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', domain], + p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode: @@ -89,7 +90,8 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.lxc) + display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index e69281d0f3..29b1e9a5ca 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -25,10 +25,13 @@ import select import fcntl import getpass +from ansible.compat.six import text_type, binary_type + import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -69,9 +72,15 @@ class Connection(ConnectionBase): raise AnsibleError("Internal Error: this module does not support optimized module pipelining") executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None - display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) + display.vvv(u"{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook display.debug("opening command with Popen()") + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = map(to_bytes, cmd) + p = subprocess.Popen( cmd, shell=isinstance(cmd, basestring), diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index a2abcf20ae..074f6aaa8a 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -33,6 +33,7 @@ from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNo from ansible.plugins.connection import ConnectionBase from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode +from ansible.compat.six import text_type, binary_type try: from __main__ import display @@ -320,7 +321,7 @@ class Connection(ConnectionBase): ''' display_cmd = map(pipes.quote, cmd) - display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) + display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host) # Start the given command. If we don't need to pipeline data, we can try # to use a pseudo-tty (ssh will have been invoked with -tt). If we are @@ -328,6 +329,12 @@ class Connection(ConnectionBase): # old pipes. p = None + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = map(to_bytes, cmd) + if not in_data: try: # Make sure stdin is a proper pty to avoid tcgetattr errors @@ -365,7 +372,7 @@ class Connection(ConnectionBase): # only when using ssh. Otherwise we can send initial data straightaway. state = states.index('ready_to_send') - if 'ssh' in cmd: + if b'ssh' in cmd: if self._play_context.prompt: # We're requesting escalation with a password, so we have to # wait for a password prompt. @@ -538,7 +545,7 @@ class Connection(ConnectionBase): stdin.close() if C.HOST_KEY_CHECKING: - if cmd[0] == "sshpass" and p.returncode == 6: + if cmd[0] == b"sshpass" and p.returncode == 6: raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.') controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr @@ -600,7 +607,7 @@ class Connection(ConnectionBase): raise AnsibleConnectionFailure("Failed to connect to the host via ssh.") except (AnsibleConnectionFailure, Exception) as e: if attempt == remaining_tries - 1: - raise e + raise else: pause = 2 ** attempt - 1 if pause > 30: @@ -674,6 +681,8 @@ class Connection(ConnectionBase): # temporarily disabled as we are forced to currently close connections after every task because of winrm # if self._connected and self._persistent: # cmd = self._build_command('ssh', '-O', 'stop', self.host) + # + # cmd = map(to_bytes, cmd) # p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # stdout, stderr = p.communicate() diff --git a/lib/ansible/plugins/connection/zone.py b/lib/ansible/plugins/connection/zone.py index 75d7db545d..b65c80b73f 100644 --- a/lib/ansible/plugins/connection/zone.py +++ b/lib/ansible/plugins/connection/zone.py @@ -31,6 +31,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils import to_bytes try: from __main__ import display @@ -56,8 +57,8 @@ class Connection(ConnectionBase): if os.geteuid() != 0: raise AnsibleError("zone connection requires running as root") - self.zoneadm_cmd = self._search_executable('zoneadm') - self.zlogin_cmd = self._search_executable('zlogin') + self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm')) + self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) if self.zone not in self.list_zones(): raise AnsibleError("incorrect zone name %s" % self.zone) @@ -86,7 +87,7 @@ class Connection(ConnectionBase): def get_zone_path(self): #solaris10vm# zoneadm -z cswbuild list -p #-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared - process = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'], + process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -113,6 +114,7 @@ class Connection(ConnectionBase): # this through /bin/sh -c here. Instead it goes through the shell # that zlogin selects. local_cmd = [self.zlogin_cmd, self.zone, cmd] + local_cmd = map(to_bytes, local_cmd) display.vvv("EXEC %s" % (local_cmd), host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, diff --git a/test/integration/unicode-test-script b/test/integration/unicode-test-script new file mode 100755 index 0000000000..340f2a9f5b --- /dev/null +++ b/test/integration/unicode-test-script @@ -0,0 +1,7 @@ +#!/bin/sh + +echo "Non-ascii arguments:" +echo $@ + +echo "Non-ascii Env var:" +echo $option diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 6e8e073a79..f38bf8f5e8 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -49,6 +49,51 @@ that: - "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines" + - name: Run raw with non-ascii options + raw: "/bin/echo Zażółć gęślą jaźń" + register: results + + - name: Check that raw output the right thing + assert: + that: + - "'Zażółć gęślą jaźń' in results.stdout_lines" + + - name: Run a script with non-ascii options and environment + script: unicode-test-script --option "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that script output includes the nonascii arguments and environment values + assert: + that: + - "'--option Zażółć gęślą jaźń' in results.stdout_lines" + - "'Zażółć' in results.stdout_lines" + + - name: Ping with non-ascii environment variable and option + ping: + data: "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that ping with non-ascii data was correct + assert: + that: + - "'Zażółć gęślą jaźń' == results.ping" + + - name: Command that echos a non-ascii env var + command: "echo $option" + environment: + option: Zażółć + register: results + + - name: Check that a non-ascii env var was passed to the command module + assert: + that: + - "'Zażółć' in results.stdout_lines" + + - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' gather_facts: true From 6470f7de2cf4cfc37fa5fef66c7e37514b6139d3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 5 Jan 2016 07:53:22 -0800 Subject: [PATCH 425/590] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 002028748f..33014c6db1 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 002028748f080961ade801c30e194bfd4ba043ce +Subproject commit 33014c6db1ce757d0ffa065e6c9924ac4db1cacc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f6a7b6dd1f..82a4cf84be 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f6a7b6dd1f7be93ba640c50bf26adeeabb5af46f +Subproject commit 82a4cf84be82244d0cf7d043c8cbb4f176f086db From 11ce08b9dde32c7e4b51a6fffc22f301c81181be Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Tue, 5 Jan 2016 18:04:38 +0100 Subject: [PATCH 426/590] cleaner implementation and random chunk length. --- lib/ansible/parsing/vault/__init__.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index bcd038c8b8..1d4eeef465 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -22,6 +22,7 @@ import shlex import shutil import sys import tempfile +import random from io import BytesIO from subprocess import call from ansible.errors import AnsibleError @@ -235,20 +236,21 @@ class VaultEditor: """ file_len = os.path.getsize(tmp_path) + max_chunk_len = min(1024*1024*2, file_len) passes = 3 with open(tmp_path, "wb") as fh: for _ in range(passes): fh.seek(0, 0) - # get a random chunk of data - data = os.urandom(min(1024*1024*2, file_len)) - bytes_todo = file_len - while bytes_todo > 0: - chunk = data[:bytes_todo] - fh.write(chunk) - bytes_todo -= len(chunk) - - assert(fh.tell() == file_len) + # get a random chunk of data, each pass with other length + chunk_len = random.randint(max_chunk_len/2, max_chunk_len) + data = os.urandom(chunk_len) + + for _ in range(0, file_len // chunk_len): + fh.write(data) + fh.write(data[:file_len % chunk_len]) + + assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy os.fsync(fh) @@ -273,13 +275,12 @@ class VaultEditor: r = call(['shred', tmp_path]) except OSError as e: # shred is not available on this system, or some other error occured. - self._shred_file_custom(tmp_path) - r = 0 + r = 1 if r != 0: # we could not successfully execute unix shred; therefore, do custom shred. self._shred_file_custom(tmp_path) - + os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): From 9972c27a9bc1dd2c9051368e082e2b366a04acbe Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 Jan 2016 18:44:09 -0500 Subject: [PATCH 427/590] now handles 'non file diffs' this allows modules to pass back a 'diff' dict and it will still show using the file interface --- lib/ansible/plugins/callback/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index cc2a9ad0e7..faf04b1180 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -116,6 +116,10 @@ class CallbackBase: if 'src_larger' in diff: ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: + # format complex structures into 'files' + for x in ['before', 'after']: + if isinstance(diff[x], dict): + diff[x] = json.dumps(diff[x], sort_keys=True, indent=4) if 'before_header' in diff: before_header = "before: %s" % diff['before_header'] else: From f3c45adfb8670701d0b19e86787a5213bb5afb5f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 Jan 2016 19:58:06 -0500 Subject: [PATCH 428/590] simplified diff handling in callback no need for the copy or other complexity --- lib/ansible/plugins/callback/default.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index e515945bba..276ac435f4 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -137,11 +137,8 @@ class CallbackModule(CallbackBase): def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: for res in result._result['results']: - newres = self._copy_result(result) - res['item'] = self._get_item(res) - newres._result = res - - self.v2_on_file_diff(newres) + if 'diff' in res: + self._display.display(self._get_diff(res['diff'])) elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) From a65543bbafbd328e7848a99d2a570f71c43a53a0 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Tue, 5 Jan 2016 14:52:06 -0600 Subject: [PATCH 429/590] adding password no_log and cleaning up argument spec --- lib/ansible/module_utils/vca.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py index ef89d54556..9737cca8b4 100644 --- a/lib/ansible/module_utils/vca.py +++ b/lib/ansible/module_utils/vca.py @@ -35,8 +35,8 @@ class VcaError(Exception): def vca_argument_spec(): return dict( - username=dict(), - password=dict(), + username=dict(type='str', aliases=['user'], required=True), + password=dict(type='str', aliases=['pass','passwd'], required=True, no_log=True), org=dict(), service_id=dict(), instance_id=dict(), From dc47c25e589f1c2b1f44867076624f0e0564b7c6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 5 Jan 2016 22:01:01 -0500 Subject: [PATCH 430/590] Minor tweak to ensure diff is not empty in callback for file diffs --- lib/ansible/plugins/callback/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 6ca728e65f..dfad657934 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -137,7 +137,7 @@ class CallbackModule(CallbackBase): def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: for res in result._result['results']: - if 'diff' in res: + if 'diff' in res and res['diff']: self._display.display(self._get_diff(res['diff'])) elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) From 7c8374e0f8e153368bb6a22caf7b7ada07f8d797 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Wed, 6 Jan 2016 20:44:19 +0530 Subject: [PATCH 431/590] Strip string terms before templating The earlier code did call terms.strip(), but ignored the return value instead of passing that in to templar.template(). Clearly an oversight. --- lib/ansible/utils/listify.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py index 7fe83a8fa0..d834737ab5 100644 --- a/lib/ansible/utils/listify.py +++ b/lib/ansible/utils/listify.py @@ -31,9 +31,8 @@ __all__ = ['listify_lookup_plugin_terms'] def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=False, convert_bare=True): if isinstance(terms, string_types): - stripped = terms.strip() # TODO: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override - terms = templar.template(terms, convert_bare=convert_bare, fail_on_undefined=fail_on_undefined) + terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined) else: terms = templar.template(terms, fail_on_undefined=fail_on_undefined) From 11b55be5bbb90b2bc917b2637d6fcdbe1a15092d Mon Sep 17 00:00:00 2001 From: muffl0n Date: Thu, 20 Aug 2015 10:31:48 +0200 Subject: [PATCH 432/590] Show version without supplying a dummy action fixes #12004 parsing x2 does not seem to break anything --- lib/ansible/cli/galaxy.py | 7 +++++-- lib/ansible/cli/vault.py | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 476a7d0f89..a022d17859 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -50,7 +50,7 @@ class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") - + def __init__(self, args): self.api = None self.galaxy = None @@ -64,6 +64,9 @@ class GalaxyCLI(CLI): epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) + # Workaround for #12004: show version without supplying a dummy action + self.parser.parse_args() + self.set_action() # options specific to actions @@ -141,7 +144,7 @@ class GalaxyCLI(CLI): return True def run(self): - + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 9908f17e57..50a6fdebdc 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -53,6 +53,9 @@ class VaultCLI(CLI): epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) + # Workaround for #12004: show version without supplying a dummy action + self.parser.parse_args() + self.set_action() # options specific to self.actions From ab2f47327a82148441140c9b98a02a6e28877153 Mon Sep 17 00:00:00 2001 From: Sandra Wills Date: Wed, 6 Jan 2016 13:59:25 -0500 Subject: [PATCH 433/590] removed the "wy-side-nav-search" element this is so we can use the new swiftype search and it's search input --- docsite/_themes/srtd/layout.html | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 41b6b75c1d..a10b7656aa 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -150,11 +150,6 @@

- -