1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

Remove inventory and vault scripts (#2696)

* Remove inventory and vault scripts.

* Remove foreman inventory script tests.
This commit is contained in:
Felix Fontein 2021-06-19 15:06:58 +02:00 committed by GitHub
parent 67cabcb2aa
commit 08f7ad06be
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
78 changed files with 2 additions and 12854 deletions

View file

@ -0,0 +1,2 @@
removed_features:
- "All inventory and vault scripts contained in community.general were moved to the `contrib-scripts GitHub repository <https://github.com/ansible-community/contrib-scripts>`_ (https://github.com/ansible-collections/community.general/pull/2696)."

View file

@ -1,48 +0,0 @@
# Ansible external inventory script settings for Abiquo
#
# Define an Abiquo user with access to Abiquo API which will be used to
# perform required queries to obtain information to generate the Ansible
# inventory output.
#
[auth]
apiuser = admin
apipass = xabiquo
# Specify Abiquo API version in major.minor format and the access URI to
# API endpoint. Tested versions are: 2.6 , 3.0 and 3.1
# To confirm that your box haves access to Abiquo API you can perform a
# curl command, replacing with suitable values, similar to this:
# curl -X GET https://192.168.2.100/api/login -u admin:xabiquo
#
[api]
version = 3.0
uri = https://192.168.2.100/api
# You probably won't need to modify login preferences, but just in case
login_path = /login
login_type = application/vnd.abiquo.user+json
# To avoid performing excessive calls to Abiquo API you can define a
# cache for the plugin output. Within the time defined in seconds, latest
# output will be reused. After that time, the cache will be refreshed.
#
[cache]
cache_max_age = 30
cache_dir = /tmp
[defaults]
# Depending in your Abiquo environment, you may want to use only public IP
# addresses (if using public cloud providers) or also private IP addresses.
# You can set this with public_ip_only configuration.
public_ip_only = false
# default_net_interface only is used if public_ip_only = false
# If public_ip_only is set to false, you can choose default nic to obtain
# IP address to define the host.
default_net_interface = nic0
# Only deployed VM are displayed in the plugin output.
deployed_only = true
# Define if VM metadata is obtained from Abiquo API.
get_metadata = false

View file

@ -1,224 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Abiquo
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Abiquo API
Requires some python libraries, ensure to have them installed when using this script.
This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6.
Before using this script you may want to modify abiquo.ini config file.
This script generates an Ansible hosts file with these host groups:
ABQ_xxx: Defines a hosts itself by Abiquo VM name label
all: Contains all hosts defined in Abiquo user's enterprise
virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it
virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it
imagetemplate: Creates a host group for each image template containing all hosts using it
'''
# (c) 2014, Daniel Beneyto <daniel.beneyto@abiquo.com>
#
# This file is part of Ansible,
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import time
import json
from ansible.module_utils.six.moves import configparser as ConfigParser
from ansible.module_utils.urls import open_url
def api_get(link, config):
try:
if link is None:
url = config.get('api', 'uri') + config.get('api', 'login_path')
headers = {"Accept": config.get('api', 'login_type')}
else:
url = link['href'] + '?limit=0'
headers = {"Accept": link['type']}
result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
url_password=config.get('auth', 'apipass').replace('\n', ''))
return json.loads(result.read())
except Exception:
return None
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath, 'inventory']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath, 'inventory']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat('/'.join([dpath, 'inventory']))
except Exception:
# cache doesn't exist or isn't accessible
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate_inv_from_api(enterprise_entity, config):
try:
inventory['all'] = {}
inventory['all']['children'] = []
inventory['all']['hosts'] = []
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = {}
enterprise = api_get(enterprise_entity, config)
vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines')
vms = api_get(vms_entity, config)
for vmcollection in vms['collection']:
for link in vmcollection['links']:
if link['rel'] == 'virtualappliance':
vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualdatacenter':
vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualmachinetemplate':
vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
# From abiquo.ini: Only adding to inventory VMs with public IP
if config.getboolean('defaults', 'public_ip_only') is True:
for link in vmcollection['links']:
if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip':
vm_nic = link['title']
break
else:
vm_nic = None
# Otherwise, assigning defined network interface IP address
else:
for link in vmcollection['links']:
if link['rel'] == config.get('defaults', 'default_net_interface'):
vm_nic = link['title']
break
else:
vm_nic = None
vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed
if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED':
vm_state = False
if vm_nic is not None and vm_state:
if vm_vapp not in inventory:
inventory[vm_vapp] = {}
inventory[vm_vapp]['children'] = []
inventory[vm_vapp]['hosts'] = []
if vm_vdc not in inventory:
inventory[vm_vdc] = {}
inventory[vm_vdc]['hosts'] = []
inventory[vm_vdc]['children'] = []
if vm_template not in inventory:
inventory[vm_template] = {}
inventory[vm_template]['children'] = []
inventory[vm_template]['hosts'] = []
if config.getboolean('defaults', 'get_metadata') is True:
meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata')
try:
metadata = api_get(meta_entity, config)
if (config.getfloat("api", "version") >= 3.0):
vm_metadata = metadata['metadata']
else:
vm_metadata = metadata['metadata']['metadata']
inventory['_meta']['hostvars'][vm_nic] = vm_metadata
except Exception as e:
pass
inventory[vm_vapp]['children'].append(vmcollection['name'])
inventory[vm_vdc]['children'].append(vmcollection['name'])
inventory[vm_template]['children'].append(vmcollection['name'])
inventory['all']['children'].append(vmcollection['name'])
inventory[vmcollection['name']] = []
inventory[vmcollection['name']].append(vm_nic)
return inventory
except Exception as e:
# Return empty hosts output
return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(enterprise, config):
''' Reads the inventory from cache or Abiquo api '''
if cache_available(config):
inv = get_cache('inventory', config)
else:
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
# MAKE ABIQUO API CALLS #
inv = generate_inv_from_api(enterprise, config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
enterprise = {}
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
login = api_get(None, config)
enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise')
except Exception as e:
enterprise = None
if cache_available(config):
inventory = get_cache('inventory', config)
else:
inventory = get_inventory(enterprise, config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()

View file

@ -1,336 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Sebastien Goasguen <runseb@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
'''
Apache Libcloud generic external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Cloud providers using the Apache libcloud library.
This script also assumes there is a libcloud.ini file alongside it
'''
import sys
import os
import argparse
import re
from time import time
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils.six.moves import configparser as ConfigParser
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security as sec
import json
class LibcloudInventory(object):
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = {}
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the libcloud.ini file '''
config = ConfigParser.SafeConfigParser()
libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini')
libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path)
config.read(libcloud_ini_path)
if not config.has_section('driver'):
raise ValueError('libcloud.ini file must contain a [driver] section')
if config.has_option('driver', 'provider'):
self.provider = config.get('driver', 'provider')
else:
raise ValueError('libcloud.ini does not have a provider defined')
if config.has_option('driver', 'key'):
self.key = config.get('driver', 'key')
else:
raise ValueError('libcloud.ini does not have a key defined')
if config.has_option('driver', 'secret'):
self.secret = config.get('driver', 'secret')
else:
raise ValueError('libcloud.ini does not have a secret defined')
if config.has_option('driver', 'host'):
self.host = config.get('driver', 'host')
if config.has_option('driver', 'secure'):
self.secure = config.get('driver', 'secure')
if config.has_option('driver', 'verify_ssl_cert'):
self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert')
if config.has_option('driver', 'port'):
self.port = config.get('driver', 'port')
if config.has_option('driver', 'path'):
self.path = config.get('driver', 'path')
if config.has_option('driver', 'api_version'):
self.api_version = config.get('driver', 'api_version')
Driver = get_driver(getattr(Provider, self.provider))
self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure,
host=self.host, path=self.path)
# Cache related
cache_path = config.get('cache', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self):
'''
Command line argument processing
'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
'''
Do API calls to a location, and save data in cache files
'''
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
'''
Gets the list of all nodes
'''
for node in self.conn.list_nodes():
self.add_node(node)
def get_node(self, node_id):
'''
Gets details about a specific node
'''
return [node for node in self.conn.list_nodes() if node.id == node_id][0]
def add_node(self, node):
'''
Adds a node to the inventory and index, as long as it is
addressable
'''
# Only want running instances
if node.state != 0:
return
# Select the best destination address
if not node.public_ips == []:
dest = node.public_ips[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = node.name
# Inventory: Group by instance ID (always a group of 1)
self.inventory[node.name] = [dest]
'''
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, node.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
'''
# Inventory: Group by key pair
if node.extra['key_name']:
self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
# Inventory: Group by security group, quick thing to handle single sg
if node.extra['security_group']:
self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)
# Inventory: Group by tag
if node.extra['tags']:
for tagkey in node.extra['tags'].keys():
self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest)
def get_host_info(self):
'''
Get variables about a specific host
'''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
node_id = self.index[self.args.host]
node = self.get_node(node_id)
instance_vars = {}
for key, value in vars(node).items():
key = self.to_safe('ec2_' + key)
# Handle complex types
if isinstance(value, (int, bool)):
instance_vars[key] = value
elif isinstance(value, string_types):
instance_vars[key] = value.strip()
elif value is None:
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
for k, v in iteritems(value):
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
# print(key)
# print(type(value))
# print(value)
return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element):
'''
Pushed an element onto an array that may not have been defined in
the dict
'''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
'''
Reads the inventory from the cache file and returns it as a JSON
object
'''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
'''
Reads the index from the cache file sets self.index
'''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
'''
Writes data in JSON format to a file
'''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
'''
Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups
'''
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
'''
Converts a dict to a JSON object and dumps it as a formatted
string
'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def main():
LibcloudInventory()
if __name__ == '__main__':
main()

View file

@ -1,20 +0,0 @@
# Ansible Apstra AOS external inventory script settings
# Dynamic Inventory script parameter can be provided using this file
# Or by using Environment Variables:
# - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
#
# This file takes precedence over the Environment Variables
#
[aos]
# aos_server = 172.20.62.3
# port = 8888
# username = admin
# password = admin
## Blueprint Mode
# to use the inventory in mode Blueprint, you need to define the blueprint name you want to use
# blueprint = my-blueprint-l2
# blueprint_interface = true

View file

@ -1,580 +0,0 @@
#!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your preferred directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import json
import os
import re
import sys
try:
import argparse
HAS_ARGPARSE = True
except ImportError:
HAS_ARGPARSE = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
if not HAS_ARGPARSE:
raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except Exception:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
try:
self.aos_server_port = config.get('aos', 'port')
except Exception:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
try:
self.aos_username = config.get('aos', 'username')
except Exception:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
try:
self.aos_password = config.get('aos', 'password')
except Exception:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except Exception:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except Exception:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile(r'\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()

View file

@ -1,23 +0,0 @@
#
# Configuration file for azure_rm.py
#
[azure]
# Control which resource groups are included. By default all resources groups are included.
# Set resource_groups to a comma separated list of resource groups names.
#resource_groups=
# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
#tags=
# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus)
#locations=
# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
include_powerstate=yes
# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1.
group_by_resource_group=yes
group_by_location=yes
group_by_security_group=yes
group_by_os_family=yes
group_by_tag=yes

View file

@ -1,962 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Important note (2018/10)
========================
This inventory script is in maintenance mode: only critical bug fixes but no new features.
There's new Azure external inventory script at https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/azure_rm.py,
with better performance and latest new features. Please go to the link to get latest Azure inventory.
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the Azure Python SDK. For instruction on installing the
Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
- cloud_environment
- adfs_authority_url
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
- AZURE_CLOUD_ENVIRONMENT
- AZURE_ADFS_AUTHORITY_URL
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-00-5E-00-53-FE",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
export AZURE_INI_PATH=/path/to/custom.ini
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import json
import os
import re
import sys
import inspect
from os.path import expanduser
from ansible.module_utils.six.moves import configparser as cp
import ansible.module_utils.six.moves.urllib.parse as urlparse
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
CLIError = None
try:
from msrestazure.azure_active_directory import AADTokenCredentials
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure import azure_cloud
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.subscriptions import SubscriptionClient
from azure.mgmt.compute import ComputeManagementClient
from adal.authentication_context import AuthenticationContext
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
try:
from ansible.release import __version__ as ansible_version
except ImportError:
ansible_version = 'unknown'
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG',
group_by_os_family='AZURE_GROUP_BY_OS_FAMILY',
use_private_ip='AZURE_USE_PRIVATE_IP'
)
AZURE_MIN_VERSION = "2.0.0"
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._cloud_environment = None
self._compute_client = None
self._resource_client = None
self._network_client = None
self._adfs_authority_url = None
self._resource = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
# get authentication authority
# for adfs, user could pass in authority or not.
# for others, use default authority from cloud environment
if self.credentials.get('adfs_authority_url'):
self._adfs_authority_url = self.credentials.get('adfs_authority_url')
else:
self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
# get resource from cloud environment
self._resource = self._cloud_environment.endpoints.active_directory_resource_id
if self.credentials.get('credentials'):
self.azure_credentials = self.credentials.get('credentials')
elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment)
elif self.credentials.get('ad_user') is not None and \
self.credentials.get('password') is not None and \
self.credentials.get('client_id') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = self.acquire_token_with_username_password(
self._adfs_authority_url,
self._resource,
self.credentials['ad_user'],
self.credentials['password'],
self.credentials['client_id'],
self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common'
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment)
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password, or "
"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
"be logged in using AzureCLI.")
def log(self, msg):
if self.debug:
print(msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = cp.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except Exception:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_msi_credentials(self, subscription_id_param=None):
credentials = MSIAuthentication()
subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
try:
# try to get the subscription in MSI to test whether MSI is enabled
subscription_client = SubscriptionClient(credentials)
subscription = next(subscription_client.subscriptions.list())
subscription_id = str(subscription.subscription_id)
return {
'credentials': credentials,
'subscription_id': subscription_id_param or subscription_id
}
except Exception as exc:
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
if arg_credentials['ad_user'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
if msi_credentials:
self.log('Retrieved credentials from MSI.')
return msi_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
authority_uri = authority
if tenant is not None:
authority_uri = authority + '/' + tenant
context = AuthenticationContext(authority_uri)
token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
return AADTokenCredentials(token_response)
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
def get_mgmt_svc_client(self, client_type, base_url, api_version):
client = client_type(self.azure_credentials,
self.subscription_id,
base_url=base_url,
api_version=api_version)
client.config.add_user_agent(ANSIBLE_USER_AGENT)
return client
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-06-01')
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-05-10')
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-03-30')
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_os_family = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self.use_private_ip = False
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print(self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad_user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--adfs_authority_url', action='store',
help='Azure ADFS authority url')
parser.add_argument('--cloud_environment', action='store',
help='Azure Cloud Environment name or metadata discovery URL')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower())
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
# TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value.lower()
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
host_vars['ansible_connection'] = 'winrm'
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if self.use_private_ip:
host_vars['ansible_host'] = ip_config.private_ip_address
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
if not self.use_private_ip:
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower())
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_os_family:
if not self._inventory.get(operating_system_type):
self._inventory[operating_system_type] = []
self._inventory[operating_system_type].append(host_name)
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].items():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
config = None
settings = None
try:
config = cp.ConfigParser()
config.read(path)
except Exception:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except Exception:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += r"\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC))
AzureInventory()
if __name__ == '__main__':
main()

View file

@ -1,39 +0,0 @@
# Copyright 2016 Doalitic.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# The Brook.io inventory script has the following dependencies:
# 1. A working Brook.io account
# See https://brook.io
# 2. A valid token generated through the 'API token' panel of Brook.io
# 3. The libbrook python libray.
# See https://github.com/doalitic/libbrook
#
# Author: Francisco Ros <fjros@doalitic.com>
[brook]
# Valid API token (required).
# E.g. 'Aed342a12A60433697281FeEe1a4037C'
#
api_token =
# Project id within Brook.io, as obtained from the project settings (optional). If provided, the
# generated inventory will just include the hosts that belong to such project. Otherwise, it will
# include all hosts in projects the requesting user has access to. The response includes groups
# 'project_x', being 'x' the project name.
# E.g. '2e8e099e1bc34cc0979d97ac34e9577b'
#
project_id =

View file

@ -1,248 +0,0 @@
#!/usr/bin/env python
# Copyright 2016 Doalitic.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Brook.io external inventory script
==================================
Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook
library. Hence, such dependency must be installed in the system to run this script.
The default configuration file is named 'brook.ini' and is located alongside this script. You can
choose any other file by setting the BROOK_INI_PATH environment variable.
If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in
projects where the requesting user belongs. Otherwise, only instances from the given project are
included, provided the requesting user belongs to it.
The following variables are established for every host. They can be retrieved from the hostvars
dictionary.
- brook_pid: str
- brook_name: str
- brook_description: str
- brook_project: str
- brook_template: str
- brook_region: str
- brook_zone: str
- brook_status: str
- brook_tags: list(str)
- brook_internal_ips: list(str)
- brook_external_ips: list(str)
- brook_created_at
- brook_updated_at
- ansible_ssh_host
Instances are grouped by the following categories:
- tag:
A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist
instances with tags 'foo' and/or 'bar'.
- project:
A group is created for each project. E.g. group 'project_test' is created if a project named
'test' exist.
- status:
A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING'
are created if there are instances in running and pending state.
Examples:
Execute uname on all instances in project 'test'
$ ansible -i brook.py project_test -m shell -a "/bin/uname -a"
Install nginx on all debian web servers tagged with 'www'
$ ansible -i brook.py tag_www -m apt -a "name=nginx state=present"
Run site.yml playbook on web servers
$ ansible-playbook -i brook.py site.yml -l tag_www
Support:
This script is tested on Python 2.7 and 3.4. It may work on other versions though.
Author: Francisco Ros <fjros@doalitic.com>
Version: 0.2
"""
import sys
import os
from ansible.module_utils.six.moves.configparser import SafeConfigParser as ConfigParser
import json
try:
import libbrook
except Exception:
sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
class BrookInventory:
_API_ENDPOINT = 'https://api.brook.io'
def __init__(self):
self._configure_from_file()
self.client = self.get_api_client()
self.inventory = self.get_inventory()
def _configure_from_file(self):
"""Initialize from .ini file.
Configuration file is assumed to be named 'brook.ini' and to be located on the same
directory than this file, unless the environment variable BROOK_INI_PATH says otherwise.
"""
brook_ini_default_path = \
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini')
brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path)
config = ConfigParser(defaults={
'api_token': '',
'project_id': ''
})
config.read(brook_ini_path)
self.api_token = config.get('brook', 'api_token')
self.project_id = config.get('brook', 'project_id')
if not self.api_token:
sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic '
'inventory.')
def get_api_client(self):
"""Authenticate user via the provided credentials and return the corresponding API client.
"""
# Get JWT token from API token
#
unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT)
auth_api = libbrook.AuthApi(unauthenticated_client)
api_token = libbrook.AuthTokenRequest()
api_token.token = self.api_token
jwt = auth_api.auth_token(token=api_token)
# Create authenticated API client
#
return libbrook.ApiClient(host=self._API_ENDPOINT,
header_name='Authorization',
header_value='Bearer %s' % jwt.token)
def get_inventory(self):
"""Generate Ansible inventory.
"""
groups = dict()
meta = dict()
meta['hostvars'] = dict()
instances_api = libbrook.InstancesApi(self.client)
projects_api = libbrook.ProjectsApi(self.client)
templates_api = libbrook.TemplatesApi(self.client)
# If no project is given, get all projects the requesting user has access to
#
if not self.project_id:
projects = [project.id for project in projects_api.index_projects()]
else:
projects = [self.project_id]
# Build inventory from instances in all projects
#
for project_id in projects:
project = projects_api.show_project(project_id=project_id)
for instance in instances_api.index_instances(project_id=project_id):
# Get template used for this instance if known
template = templates_api.show_template(template_id=instance.template) if instance.template else None
# Update hostvars
try:
meta['hostvars'][instance.name] = \
self.hostvars(project, instance, template, instances_api)
except libbrook.rest.ApiException:
continue
# Group by project
project_group = 'project_%s' % project.name
if project_group in groups:
groups[project_group].append(instance.name)
else:
groups[project_group] = [instance.name]
# Group by status
status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status']
if status_group in groups:
groups[status_group].append(instance.name)
else:
groups[status_group] = [instance.name]
# Group by tags
tags = meta['hostvars'][instance.name]['brook_tags']
for tag in tags:
tag_group = 'tag_%s' % tag
if tag_group in groups:
groups[tag_group].append(instance.name)
else:
groups[tag_group] = [instance.name]
groups['_meta'] = meta
return groups
def hostvars(self, project, instance, template, api):
"""Return the hostvars dictionary for the given instance.
Raise libbrook.rest.ApiException if it cannot retrieve all required information from the
Brook.io API.
"""
hostvars = instance.to_dict()
hostvars['brook_pid'] = hostvars.pop('pid')
hostvars['brook_name'] = hostvars.pop('name')
hostvars['brook_description'] = hostvars.pop('description')
hostvars['brook_project'] = hostvars.pop('project')
hostvars['brook_template'] = hostvars.pop('template')
hostvars['brook_region'] = hostvars.pop('region')
hostvars['brook_zone'] = hostvars.pop('zone')
hostvars['brook_created_at'] = hostvars.pop('created_at')
hostvars['brook_updated_at'] = hostvars.pop('updated_at')
del hostvars['id']
del hostvars['key']
del hostvars['provider']
del hostvars['image']
# Substitute identifiers for names
#
hostvars['brook_project'] = project.name
hostvars['brook_template'] = template.name if template else None
# Retrieve instance state
#
status = api.status_instance(project_id=project.id, instance_id=instance.id)
hostvars.update({'brook_status': status.state})
# Retrieve instance tags
#
tags = api.instance_tags(project_id=project.id, instance_id=instance.id)
hostvars.update({'brook_tags': tags})
# Retrieve instance addresses
#
addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id)
internal_ips = [address.address for address in addresses if address.scope == 'internal']
external_ips = [address.address for address in addresses
if address.address and address.scope == 'external']
hostvars.update({'brook_internal_ips': internal_ips})
hostvars.update({'brook_external_ips': external_ips})
try:
hostvars.update({'ansible_ssh_host': external_ips[0]})
except IndexError:
raise libbrook.rest.ApiException(status='502', reason='Instance without public IP')
return hostvars
# Run the script
#
brook = BrookInventory()
print(json.dumps(brook.inventory))

View file

@ -1,40 +0,0 @@
[cloudforms]
# the version of CloudForms ; currently not used, but tested with
version = 4.1
# This should be the hostname of the CloudForms server
url = https://cfme.example.com
# This will more than likely need to be a local CloudForms username
username = <set your username here>
# The password for said username
password = <set your password here>
# True = verify SSL certificate / False = trust anything
ssl_verify = True
# limit the number of vms returned per request
limit = 100
# purge the CloudForms actions from hosts
purge_actions = True
# Clean up group names (from tags and other groupings so Ansible doesn't complain)
clean_group_keys = True
# Explode tags into nested groups / subgroups
nest_tags = False
# If set, ensure host name are suffixed with this value
# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is
# suffix = .example.org
# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list
prefer_ipv4 = False
[cache]
# Maximum time to trust the cache in seconds
max_age = 600

View file

@ -1,499 +0,0 @@
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
from ansible.module_utils.six.moves import configparser as ConfigParser
import os
import re
from time import time
import requests
from requests.auth import HTTPBasicAuth
import warnings
from ansible.errors import AnsibleError
import json
class CloudFormsInventory(object):
def __init__(self):
"""
Main execution path
"""
self.inventory = dict() # A list of groups and the hosts in that group
self.hosts = dict() # Details about hosts in the inventory
# Parse CLI arguments
self.parse_cli_args()
# Read settings
self.read_settings()
# Cache
if self.args.refresh_cache or not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_hosts_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
if self.args.debug:
print("Fetching host [%s]" % self.args.host)
data_to_print += self.get_host_info(self.args.host)
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.hosts:
self.inventory['_meta']['hostvars'][hostname] = {
'cloudforms': self.hosts[hostname],
}
# include the ansible_ssh_host in the top level
if 'ansible_ssh_host' in self.hosts[hostname]:
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host']
data_to_print += self.json_format_dict(self.inventory, self.args.pretty)
print(data_to_print)
def is_cache_valid(self):
"""
Determines if the cache files have expired, or if it is still valid
"""
if self.args.debug:
print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age))
if os.path.isfile(self.cache_path_hosts):
mod_time = os.path.getmtime(self.cache_path_hosts)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
if self.args.debug:
print("Cache is still valid!")
return True
if self.args.debug:
print("Cache is stale or does not exist.")
return False
def read_settings(self):
"""
Reads the settings from the cloudforms.ini file
"""
config = ConfigParser.SafeConfigParser()
config_paths = [
os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini',
"/etc/ansible/cloudforms.ini",
]
env_value = os.environ.get('CLOUDFORMS_INI_PATH')
if env_value is not None:
config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
if self.args.debug:
for config_path in config_paths:
print("Reading from configuration file [%s]" % config_path)
config.read(config_paths)
# CloudForms API related
if config.has_option('cloudforms', 'url'):
self.cloudforms_url = config.get('cloudforms', 'url')
else:
self.cloudforms_url = None
if not self.cloudforms_url:
warnings.warn("No url specified, expected something like 'https://cfme.example.com'")
if config.has_option('cloudforms', 'username'):
self.cloudforms_username = config.get('cloudforms', 'username')
else:
self.cloudforms_username = None
if not self.cloudforms_username:
warnings.warn("No username specified, you need to specify a CloudForms username.")
if config.has_option('cloudforms', 'password'):
self.cloudforms_pw = config.get('cloudforms', 'password', raw=True)
else:
self.cloudforms_pw = None
if not self.cloudforms_pw:
warnings.warn("No password specified, you need to specify a password for the CloudForms user.")
if config.has_option('cloudforms', 'ssl_verify'):
self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify')
else:
self.cloudforms_ssl_verify = True
if config.has_option('cloudforms', 'version'):
self.cloudforms_version = config.get('cloudforms', 'version')
else:
self.cloudforms_version = None
if config.has_option('cloudforms', 'limit'):
self.cloudforms_limit = config.getint('cloudforms', 'limit')
else:
self.cloudforms_limit = 100
if config.has_option('cloudforms', 'purge_actions'):
self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions')
else:
self.cloudforms_purge_actions = True
if config.has_option('cloudforms', 'clean_group_keys'):
self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys')
else:
self.cloudforms_clean_group_keys = True
if config.has_option('cloudforms', 'nest_tags'):
self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags')
else:
self.cloudforms_nest_tags = False
if config.has_option('cloudforms', 'suffix'):
self.cloudforms_suffix = config.get('cloudforms', 'suffix')
if self.cloudforms_suffix[0] != '.':
raise AnsibleError('Leading fullstop is required for Cloudforms suffix')
else:
self.cloudforms_suffix = None
if config.has_option('cloudforms', 'prefer_ipv4'):
self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4')
else:
self.cloudforms_prefer_ipv4 = False
# Ansible related
try:
group_patterns = config.get('ansible', 'group_patterns')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
group_patterns = "[]"
self.group_patterns = eval(group_patterns)
# Cache related
try:
cache_path = os.path.expanduser(config.get('cache', 'path'))
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
cache_path = '.'
(script, ext) = os.path.splitext(os.path.basename(__file__))
self.cache_path_hosts = cache_path + "/%s.hosts" % script
self.cache_path_inventory = cache_path + "/%s.inventory" % script
self.cache_max_age = config.getint('cache', 'max_age')
if self.args.debug:
print("CloudForms settings:")
print("cloudforms_url = %s" % self.cloudforms_url)
print("cloudforms_username = %s" % self.cloudforms_username)
print("cloudforms_pw = %s" % self.cloudforms_pw)
print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify)
print("cloudforms_version = %s" % self.cloudforms_version)
print("cloudforms_limit = %s" % self.cloudforms_limit)
print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions)
print("Cache settings:")
print("cache_max_age = %s" % self.cache_max_age)
print("cache_path_hosts = %s" % self.cache_path_hosts)
print("cache_path_inventory = %s" % self.cache_path_inventory)
def parse_cli_args(self):
"""
Command line argument processing
"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)')
parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)')
self.args = parser.parse_args()
def _http_request(self, url):
"""
Make a request and return the result converted from JSON
"""
results = []
ret = requests.get(url,
auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw),
verify=self.cloudforms_ssl_verify)
ret.raise_for_status()
try:
results = json.loads(ret.text)
except ValueError:
warnings.warn(
"Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason))
results = {}
if self.args.debug:
print("=======================================================================")
print("=======================================================================")
print("=======================================================================")
print(ret.text)
print("=======================================================================")
print("=======================================================================")
print("=======================================================================")
return results
def _get_json(self, endpoint, url_suffix):
"""
Make a request by given url, split request by configured limit,
go through all sub-requests and return the aggregated data received
by cloudforms
:param endpoint: api endpoint to access
:param url_suffix: additional api parameters
"""
limit = int(self.cloudforms_limit)
page = 0
last_page = False
results = []
while not last_page:
offset = page * limit
url = "%s%s?offset=%s&limit=%s%s" % (
self.cloudforms_url, endpoint, offset, limit, url_suffix)
if self.args.debug:
print("Connecting to url '%s'" % url)
ret = self._http_request(url)
results += [ret]
if 'subcount' in ret:
if ret['subcount'] < limit:
last_page = True
page += 1
else:
last_page = True
return results
def _get_hosts(self):
"""
Get all hosts
"""
endpoint = "/api/vms"
url_suffix = "&expand=resources,tags,hosts,&attributes=active,ipaddresses&filter[]=active=true"
results = self._get_json(endpoint, url_suffix)
resources = [item for sublist in results for item in sublist['resources']]
return resources
def update_cache(self):
"""
Make calls to cloudforms and save the output in a cache
"""
self.groups = dict()
self.hosts = dict()
if self.args.debug:
print("Updating cache...")
for host in self._get_hosts():
if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix):
host['name'] = host['name'] + self.cloudforms_suffix
# Ignore VMs that are not powered on
if host['power_state'] != 'on':
if self.args.debug:
print("Skipping %s because power_state = %s" % (host['name'], host['power_state']))
continue
# purge actions
if self.cloudforms_purge_actions and 'actions' in host:
del host['actions']
# Create ansible groups for tags
if 'tags' in host:
# Create top-level group
if 'tags' not in self.inventory:
self.inventory['tags'] = dict(children=[], vars={}, hosts=[])
if not self.cloudforms_nest_tags:
# don't expand tags, just use them in a safe way
for group in host['tags']:
# Add sub-group, as a child of top-level
safe_key = self.to_safe(group['name'])
if safe_key:
if self.args.debug:
print("Adding sub-group '%s' to parent 'tags'" % safe_key)
if safe_key not in self.inventory['tags']['children']:
self.push(self.inventory['tags'], 'children', safe_key)
self.push(self.inventory, safe_key, host['name'])
if self.args.debug:
print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key))
else:
# expand the tags into nested groups / sub-groups
# Create nested groups for tags
safe_parent_tag_name = 'tags'
for tag in host['tags']:
tag_hierarchy = tag['name'][1:].split('/')
if self.args.debug:
print("Working on list %s" % tag_hierarchy)
for tag_name in tag_hierarchy:
if self.args.debug:
print("Working on tag_name = %s" % tag_name)
safe_tag_name = self.to_safe(tag_name)
if self.args.debug:
print("Using sanitized name %s" % safe_tag_name)
# Create sub-group
if safe_tag_name not in self.inventory:
self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[])
# Add sub-group, as a child of top-level
if safe_parent_tag_name:
if self.args.debug:
print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name))
if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']:
self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name)
# Make sure the next one uses this one as it's parent
safe_parent_tag_name = safe_tag_name
# Add the host to the last tag
self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name'])
# Set ansible_ssh_host to the first available ip address
if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list):
# If no preference for IPv4, just use the first entry
if not self.cloudforms_prefer_ipv4:
host['ansible_ssh_host'] = host['ipaddresses'][0]
else:
# Before we search for an IPv4 address, set using the first entry in case we don't find any
host['ansible_ssh_host'] = host['ipaddresses'][0]
for currenthost in host['ipaddresses']:
if '.' in currenthost:
host['ansible_ssh_host'] = currenthost
# Create additional groups
for key in ('location', 'type', 'vendor'):
safe_key = self.to_safe(host[key])
# Create top-level group
if key not in self.inventory:
self.inventory[key] = dict(children=[], vars={}, hosts=[])
# Create sub-group
if safe_key not in self.inventory:
self.inventory[safe_key] = dict(children=[], vars={}, hosts=[])
# Add sub-group, as a child of top-level
if safe_key not in self.inventory[key]['children']:
self.push(self.inventory[key], 'children', safe_key)
if key in host:
# Add host to sub-group
self.push(self.inventory[safe_key], 'hosts', host['name'])
self.hosts[host['name']] = host
self.push(self.inventory, 'all', host['name'])
if self.args.debug:
print("Saving cached data")
self.write_to_cache(self.hosts, self.cache_path_hosts)
self.write_to_cache(self.inventory, self.cache_path_inventory)
def get_host_info(self, host):
"""
Get variables about a specific host
"""
if not self.hosts or len(self.hosts) == 0:
# Need to load cache from cache
self.load_hosts_from_cache()
if host not in self.hosts:
if self.args.debug:
print("[%s] not found in cache." % host)
# try updating the cache
self.update_cache()
if host not in self.hosts:
if self.args.debug:
print("[%s] does not exist after cache update." % host)
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
return self.json_format_dict(self.hosts[host], self.args.pretty)
def push(self, d, k, v):
"""
Safely puts a new entry onto an array.
"""
if k in d:
d[k].append(v)
else:
d[k] = [v]
def load_inventory_from_cache(self):
"""
Reads the inventory from the cache file sets self.inventory
"""
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
def load_hosts_from_cache(self):
"""
Reads the cache from the cache file sets self.hosts
"""
cache = open(self.cache_path_hosts, 'r')
json_cache = cache.read()
self.hosts = json.loads(json_cache)
def write_to_cache(self, data, filename):
"""
Writes data in JSON format to a file
"""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
"""
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
"""
if self.cloudforms_clean_group_keys:
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
else:
return word
def json_format_dict(self, data, pretty=False):
"""
Converts a dict to a JSON object and dumps it as a formatted string
"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CloudFormsInventory()

View file

@ -1,24 +0,0 @@
# Ansible Cobbler external inventory script settings
#
[cobbler]
host = http://PATH_TO_COBBLER_SERVER/cobbler_api
# If API needs authentication add 'username' and 'password' options here.
#username = foo
#password = bar
# API calls to Cobbler can be slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-cobbler.cache
# - ansible-cobbler.index
cache_path = /tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 900

View file

@ -1,305 +0,0 @@
#!/usr/bin/env python
"""
Cobbler external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is an example of sourcing that data from Cobbler
(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler
will correspond to a group in Ansible, and --ks-meta variables will be
passed down for use in templates or even in argument lines.
NOTE: The cobbler system names will not be used. Make sure a
cobbler --dns-name is set for each cobbler system. If a system
appears with two DNS names we do not add it twice because we don't want
ansible talking to it twice. The first one found will be used. If no
--dns-name is set the system will NOT be visible to ansible. We do
not add cobbler system names because there is no requirement in cobbler
that those correspond to addresses.
Tested with Cobbler 2.0.11.
Changelog:
- 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in
higher performance at ansible startup. Groups are determined by owner rather than
default mgmt_classes. DNS name determined from hostname. cobbler values are written
to a 'cobbler' fact namespace
- 2013-09-01 pgehres: Refactored implementation to make use of caching and to
limit the number of connections to external cobbler server for performance.
Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0
"""
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
import argparse
import os
import re
from time import time
try: # Python 3
from xmlrpc.client import Server
except ImportError: # Python 2
from xmlrpclib import Server
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
# server, so it does not attempt to login with a username and password.
# this will be addressed in a future version of this script.
orderby_keyname = 'owners' # alternatively 'mgmt_classes'
class CobblerInventory(object):
def __init__(self):
""" Main execution path """
self.conn = None
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
self.ignore_settings = False # used to only look at env vars for settings.
# Read env vars, read settings, and parse CLI arguments
self.parse_env_vars()
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]}
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def _connect(self):
if not self.conn:
self.conn = Server(self.cobbler_host, allow_none=True)
self.token = None
if self.cobbler_username is not None:
self.token = self.conn.login(self.cobbler_username, self.cobbler_password)
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the cobbler.ini file """
if(self.ignore_settings):
return
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini')
self.cobbler_host = config.get('cobbler', 'host')
self.cobbler_username = None
self.cobbler_password = None
if config.has_option('cobbler', 'username'):
self.cobbler_username = config.get('cobbler', 'username')
if config.has_option('cobbler', 'password'):
self.cobbler_password = config.get('cobbler', 'password')
# Cache related
cache_path = config.get('cobbler', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = config.getint('cobbler', 'cache_max_age')
def parse_env_vars(self):
""" Reads the settings from the environment """
# Env. Vars:
# COBBLER_host
# COBBLER_username
# COBBLER_password
# COBBLER_cache_path
# COBBLER_cache_max_age
# COBBLER_ignore_settings
self.cobbler_host = os.getenv('COBBLER_host', None)
self.cobbler_username = os.getenv('COBBLER_username', None)
self.cobbler_password = os.getenv('COBBLER_password', None)
# Cache related
cache_path = os.getenv('COBBLER_cache_path', None)
if(cache_path is not None):
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30"))
# ignore_settings is used to ignore the settings file, for use in Ansible
# Tower (or AWX inventory scripts and not throw python exceptions.)
if(os.getenv('COBBLER_ignore_settings', False) == "True"):
self.ignore_settings = True
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to cobbler and save the output in a cache """
self._connect()
self.groups = dict()
self.hosts = dict()
if self.token is not None:
data = self.conn.get_systems(self.token)
else:
data = self.conn.get_systems()
for host in data:
# Get the FQDN for the host and add it to the right groups
dns_name = host['hostname'] # None
ksmeta = None
interfaces = host['interfaces']
# hostname is often empty for non-static IP hosts
if dns_name == '':
for (iname, ivalue) in iteritems(interfaces):
if ivalue['management'] or not ivalue['static']:
this_dns_name = ivalue.get('dns_name', None)
dns_name = this_dns_name if this_dns_name else ''
if dns_name == '' or dns_name is None:
continue
status = host['status']
profile = host['profile']
classes = host[orderby_keyname]
if status not in self.inventory:
self.inventory[status] = []
self.inventory[status].append(dns_name)
if profile not in self.inventory:
self.inventory[profile] = []
self.inventory[profile].append(dns_name)
for cls in classes:
if cls not in self.inventory:
self.inventory[cls] = []
self.inventory[cls].append(dns_name)
# Since we already have all of the data for the host, update the host details as well
# The old way was ksmeta only -- provide backwards compatibility
self.cache[dns_name] = host
if "ks_meta" in host:
for key, value in iteritems(host["ks_meta"]):
self.cache[dns_name][key] = value
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
def get_host_info(self):
""" Get variables about a specific host """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, True)
return self.json_format_dict(self.cache[self.args.host], True)
def push(self, my_dict, key, element):
""" Pushed an element onto an array that may not have been defined in the dict """
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a file """
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CobblerInventory()

View file

@ -1,57 +0,0 @@
# Ansible Collins external inventory script settings
#
[collins]
# You should not have a trailing slash or collins
# will not properly match the URI
host = http://localhost:9000
username = blake
password = admin:first
# Specifies a timeout for all HTTP requests to Collins.
timeout_secs = 120
# Specifies a maximum number of retries per Collins request.
max_retries = 5
# Specifies the number of results to return per paginated query as specified in
# the Pagination section of the Collins API docs:
# http://tumblr.github.io/collins/api.html
results_per_query = 100
# Specifies the Collins asset type which will be queried for; most typically
# you'll want to leave this at the default of SERVER_NODE.
asset_type = SERVER_NODE
# Collins assets can optionally be assigned hostnames; this option will preference
# the selection of an asset's hostname over an IP address as the primary identifier
# in the Ansible inventory. Typically, this value should be set to true if assets
# are assigned hostnames.
prefer_hostnames = true
# Within Collins, assets can be granted multiple IP addresses; this configuration
# value specifies the index within the 'ADDRESSES' array as returned by the
# following API endpoint:
# http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section
ip_address_index = 0
# Sets whether Collins instances in multiple datacenters will be queried.
query_remote_dcs = false
# API calls to Collins can involve large, substantial queries. For this reason,
# we cache the results of an API call. Set this to the path you want cache files
# to be written to. Two files will be written to this directory:
# - ansible-collins.cache
# - ansible-collins.index
cache_path = /tmp
# If errors occur while querying inventory, logging messages will be written
# to a logfile in the specified directory:
# - ansible-collins.log
log_path = /tmp
# The number of seconds that a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 600

View file

@ -1,429 +0,0 @@
#!/usr/bin/env python
"""
Collins external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
Collins is a hardware asset management system originally developed by
Tumblr for tracking new hardware as it built out its own datacenters. It
exposes a rich API for manipulating and querying one's hardware inventory,
which makes it an ideal 'single point of truth' for driving systems
automation like Ansible. Extensive documentation on Collins, including a quickstart,
API docs, and a full reference manual, can be found here:
http://tumblr.github.io/collins
This script adds support to Ansible for obtaining a dynamic inventory of
assets in your infrastructure, grouping them in Ansible by their useful attributes,
and binding all facts provided by Collins to each host so that they can be used to
drive automation. Some parts of this script were cribbed shamelessly from mdehaan's
Cobbler inventory script.
To use it, copy it to your repo and pass -i <collins script> to the ansible or
ansible-playbook command; if you'd like to use it by default, simply copy collins.ini
to /etc/ansible and this script to /etc/ansible/hosts.
Alongside the options set in collins.ini, there are several environment variables
that will be used instead of the configured values if they are set:
- COLLINS_USERNAME - specifies a username to use for Collins authentication
- COLLINS_PASSWORD - specifies a password to use for Collins authentication
- COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying;
this can be used to run Ansible automation against different asset classes than
server nodes, such as network switches and PDUs
- COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to
<location of collins.py>/collins.ini
If errors are encountered during operation, this script will return an exit code of
255; otherwise, it will return an exit code of 0.
Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name'].
Tested against Ansible 1.8.2 and Collins 1.3.0.
"""
# (c) 2014, Steve Salevan <steve.salevan@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
import argparse
import logging
import os
import re
import sys
from time import time
import traceback
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets'
SPECIAL_ATTRIBUTES = set([
'CREATED',
'DELETED',
'UPDATED',
'STATE',
])
LOG_FORMAT = '%(asctime)-15s %(message)s'
class Error(Exception):
pass
class MaxRetriesError(Error):
pass
class CollinsInventory(object):
def __init__(self):
""" Constructs CollinsInventory object and reads all configuration. """
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
filename=self.log_location)
self.log = logging.getLogger('CollinsInventory')
def _asset_get_attribute(self, asset, attrib):
""" Returns a user-defined attribute from an asset if it exists; otherwise,
returns None. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return asset['ATTRIBS'][attrib_block][attrib]
return None
def _asset_has_attribute(self, asset, attrib):
""" Returns whether a user-defined attribute is present on an asset. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return True
return False
def run(self):
""" Main execution path """
# Updates cache if cache is not present or has expired.
successful = True
if self.args.refresh_cache:
successful = self.update_cache()
elif not self.is_cache_valid():
successful = self.update_cache()
else:
successful = self.load_inventory_from_cache()
successful &= self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
else: # default action with no options
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
print(data_to_print)
return successful
def find_assets(self, attributes=None, operation='AND'):
""" Obtains Collins assets matching the provided attributes. """
attributes = {} if attributes is None else attributes
# Formats asset search query to locate assets matching attributes, using
# the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html
attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
query_parameters = {
'details': ['True'],
'operation': [operation],
'query': attributes_query,
'remoteLookup': [str(self.query_remote_dcs)],
'size': [self.results_per_query],
'type': [self.collins_asset_type],
}
assets = []
cur_page = 0
num_retries = 0
# Locates all assets matching the provided query, exhausting pagination.
while True:
if num_retries == self.collins_max_retries:
raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
query_parameters['page'] = cur_page
query_url = "%s?%s" % (
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
urlencode(query_parameters, doseq=True)
)
try:
response = open_url(query_url,
timeout=self.collins_timeout_secs,
url_username=self.collins_username,
url_password=self.collins_password,
force_basic_auth=True)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']
# If we've retrieved all of our assets, breaks out of the loop.
if len(json_response['data']['Data']) == 0:
break
cur_page += 1
num_retries = 0
except Exception:
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
num_retries += 1
return assets
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the collins.ini file """
config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
self.collins_host = config.get('collins', 'host')
self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
self.collins_max_retries = config.getint('collins', 'max_retries')
self.results_per_query = config.getint('collins', 'results_per_query')
self.ip_address_index = config.getint('collins', 'ip_address_index')
self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs')
self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames')
cache_path = config.get('collins', 'cache_path')
self.cache_path_cache = cache_path + \
'/ansible-collins-%s.cache' % self.collins_asset_type
self.cache_path_inventory = cache_path + \
'/ansible-collins-%s.index' % self.collins_asset_type
self.cache_max_age = config.getint('collins', 'cache_max_age')
log_path = config.get('collins', 'log_path')
self.log_location = log_path + '/ansible-collins.log'
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(
description='Produces an Ansible Inventory file based on Collins')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins '
'(default: False - use cache files)')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to Collins and saves the output in a cache """
self.cache = dict()
self.inventory = dict()
# Locates all server assets from Collins.
try:
server_assets = self.find_assets()
except Exception:
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
return False
for asset in server_assets:
# Determines the index to retrieve the asset's IP address either by an
# attribute set on the Collins asset or the pre-configured value.
if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'):
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
try:
ip_index = int(ip_index)
except Exception:
self.log.error(
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
ip_index)
else:
ip_index = self.ip_address_index
asset['COLLINS'] = {}
# Attempts to locate the asset's primary identifier (hostname or IP address),
# which will be used to index the asset throughout the Ansible inventory.
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping", asset)
continue
elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning(
"No IP address found at index %s for asset '%s', skipping",
ip_index, asset)
continue
else:
asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS']
# Adds an asset index to the Ansible inventory based upon unpacking
# the name of the asset's current STATE from its dictionary.
if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']:
state_inventory_key = self.to_safe(
'STATE-%s' % asset['ASSET']['STATE']['NAME'])
self.push(self.inventory, state_inventory_key, asset_identifier)
# Indexes asset by all user-defined Collins attributes.
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
for attrib in asset['ATTRIBS'][attrib_block].keys():
asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib]
attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib]))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by all built-in Collins attributes.
for attribute in asset['ASSET'].keys():
if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES:
attribute_val = asset['ASSET'][attribute]
if attribute_val is not None:
attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by hardware product information.
if 'HARDWARE' in asset:
if 'PRODUCT' in asset['HARDWARE']['BASE']:
product = asset['HARDWARE']['BASE']['PRODUCT']
if product:
product_key = self.to_safe(
'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT'])
self.push(self.inventory, product_key, asset_identifier)
# Indexing now complete, adds the host details to the asset cache.
self.cache[asset_identifier] = asset
try:
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
except Exception:
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
return False
return True
def push(self, dictionary, key, value):
""" Adds a value to a list at a dictionary key, creating the list if it doesn't
exist. """
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(value)
def get_host_info(self):
""" Get variables about a specific host. """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
return self.json_format_dict(self.cache[self.args.host], self.args.pretty)
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
try:
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
return True
except Exception:
self.log.error("Error while loading inventory:\n%s",
traceback.format_exc())
self.inventory = {}
return False
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
try:
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
return True
except Exception:
self.log.error("Error while loading host cache:\n%s",
traceback.format_exc())
self.cache = {}
return False
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a specified file. """
json_data = self.json_format_dict(data, self.args.pretty)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they
can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ in '__main__':
inventory = CollinsInventory()
if inventory.run():
sys.exit(0)
else:
sys.exit(-1)

View file

@ -1,54 +0,0 @@
# Ansible Consul external inventory script settings.
[consul]
#
# Bulk load. Load all possible data before building inventory JSON
# If true, script processes in-memory data. JSON generation reduces drastically
#
bulk_load = false
# restrict included nodes to those from this datacenter
#datacenter = nyc1
# url of the consul cluster to query
#url = http://demo.consul.io
url = http://localhost:8500
# suffix added to each service to create a group name e.g Service of 'redis' and
# a suffix of '_servers' will add each address to the group name 'redis_servers'
servers_suffix = _servers
#
# By default, final JSON is built based on all available info in consul.
# Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info
# There are cases when speed is preferable than having services groups
# False value will reduce script execution time drastically.
#
suffixes = true
# if specified then the inventory will generate domain names that will resolve
# via Consul's inbuilt DNS.
#domain=consul
# make groups from service tags. the name of the group is derived from the
# service name and the tag name e.g. a service named nginx with tags ['master', 'v1']
# will create groups nginx_master and nginx_v1
tags = true
# looks up the node name at the given path for a list of groups to which the
# node should be added.
kv_groups=ansible/groups
# looks up the node name at the given path for a json dictionary of metadata that
# should be attached as metadata for the node
kv_metadata=ansible/metadata
# looks up the health of each service and adds the node to 'up' and 'down' groups
# based on the service availability
#
# !!!! if availability is true, suffixes also must be true. !!!!
#
availability = true
available_suffix = _up
unavailable_suffix = _down

View file

@ -1,553 +0,0 @@
#!/usr/bin/env python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
'''
Consul.io inventory script (http://consul.io)
======================================
Generates Ansible inventory from nodes in a Consul cluster. This script will
group nodes by:
- datacenter,
- registered service
- service tags
- service status
- values from the k/v store
This script can be run with the switches
--list as expected groups all the nodes in all datacenters
--datacenter, to restrict the nodes to a single datacenter
--host to restrict the inventory to a single named node. (requires datacenter config)
The configuration for this plugin is read from a consul_io.ini file located in the
same directory as this inventory script or via environment variables. All config options in the config file
are optional except the host and port, which must point to a valid agent or
server running the http api. For more information on enabling the endpoint see.
http://www.consul.io/docs/agent/options.html
Other options include:
'bulk_load'
boolean flag. Load all possible data before building inventory JSON
If true, script processes in-memory data. JSON generation reduces drastically
This can also be set with the environmental variable CONSUL_BULK_LOAD.
'datacenter':
which restricts the included nodes to those from the given datacenter
This can also be set with the environmental variable CONSUL_DATACENTER.
'url':
the URL of the Consul cluster. host, port and scheme are derived from the
URL. If not specified, connection configuration defaults to http requests
to localhost on port 8500.
This can also be set with the environmental variable CONSUL_URL.
'domain':
if specified then the inventory will generate domain names that will resolve
via Consul's inbuilt DNS. The name is derived from the node name, datacenter
and domain <node_name>.node.<datacenter>.<domain>. Note that you will need to
have consul hooked into your DNS server for these to resolve. See the consul
DNS docs for more info.
which restricts the included nodes to those from the given datacenter
This can also be set with the environmental variable CONSUL_DOMAIN.
'suffixes':
boolean flag. By default, final JSON is built based on all available info in consul.
Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info
There are cases when speed is preferable than having services groups
False value will reduce script execution time drastically.
This can also be set with the environmental variable CONSUL_SUFFIXES.
'servers_suffix':
defining the a suffix to add to the service name when creating the service
group. e.g Service name of 'redis' and a suffix of '_servers' will add
each nodes address to the group name 'redis_servers'. No suffix is added
if this is not set
This can also be set with the environmental variable CONSUL_SERVERS_SUFFIX.
'tags':
boolean flag defining if service tags should be used to create Inventory
groups e.g. an nginx service with the tags ['master', 'v1'] will create
groups nginx_master and nginx_v1 to which the node running the service
will be added. No tag groups are created if this is missing.
This can also be set with the environmental variable CONSUL_TAGS.
'token':
ACL token to use to authorize access to the key value store. May be required
to retrieve the kv_groups and kv_metadata based on your consul configuration.
This can also be set with the environmental variable CONSUL_TOKEN.
'kv_groups':
This is used to lookup groups for a node in the key value store. It specifies a
path to which each discovered node's name will be added to create a key to query
the key/value store. There it expects to find a comma separated list of group
names to which the node should be added e.g. if the inventory contains node
'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key
'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query
returned 'test,honeypot' then the node address to both groups.
This can also be set with the environmental variable CONSUL_KV_GROUPS.
'kv_metadata':
kv_metadata is used to lookup metadata for each discovered node. Like kv_groups
above it is used to build a path to lookup in the kv store where it expects to
find a json dictionary of metadata entries. If found, each key/value pair in the
dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter
'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key
'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}'
This can also be set with the environmental variable CONSUL_KV_METADATA.
'availability':
if true then availability groups will be created for each service. The node will
be added to one of the groups based on the health status of the service. The
group name is derived from the service name and the configurable availability
suffixes
This can also be set with the environmental variable CONSUL_AVAILABILITY.
'available_suffix':
suffix that should be appended to the service availability groups for available
services e.g. if the suffix is '_up' and the service is nginx, then nodes with
healthy nginx services will be added to the nginix_up group. Defaults to
'_available'
This can also be set with the environmental variable CONSUL_AVAILABLE_SUFFIX.
'unavailable_suffix':
as above but for unhealthy services, defaults to '_unavailable'
This can also be set with the environmental variable CONSUL_UNAVAILABLE_SUFFIX.
Note that if the inventory discovers an 'ssh' service running on a node it will
register the port as ansible_ssh_port in the node's metadata and this port will
be used to access the machine.
```
'''
import os
import re
import argparse
import sys
from ansible.module_utils.six.moves import configparser
def get_log_filename():
tty_filename = '/dev/tty'
stdout_filename = '/dev/stdout'
if not os.path.exists(tty_filename):
return stdout_filename
if not os.access(tty_filename, os.W_OK):
return stdout_filename
if os.getenv('TEAMCITY_VERSION'):
return stdout_filename
return tty_filename
def setup_logging():
filename = get_log_filename()
import logging.config
logging.config.dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
},
},
'root': {
'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'),
'handlers': ['console'],
},
'handlers': {
'console': {
'class': 'logging.FileHandler',
'filename': filename,
'formatter': 'simple',
},
},
'loggers': {
'iso8601': {
'qualname': 'iso8601',
'level': 'INFO',
},
},
})
logger = logging.getLogger('consul_io.py')
logger.debug('Invoked with %r', sys.argv)
if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'):
setup_logging()
import json
try:
import consul
except ImportError as e:
sys.exit("""failed=True msg='python-consul required for this module.
See https://python-consul.readthedocs.io/en/latest/#installation'""")
from ansible.module_utils.six import iteritems
class ConsulInventory(object):
def __init__(self):
''' Create an inventory based on the catalog of nodes and services
registered in a consul cluster'''
self.node_metadata = {}
self.nodes = {}
self.nodes_by_service = {}
self.nodes_by_tag = {}
self.nodes_by_datacenter = {}
self.nodes_by_kv = {}
self.nodes_by_availability = {}
self.current_dc = None
self.inmemory_kv = []
self.inmemory_nodes = []
config = ConsulConfig()
self.config = config
self.consul_api = config.get_consul_api()
if config.has_config('datacenter'):
if config.has_config('host'):
self.load_data_for_node(config.host, config.datacenter)
else:
self.load_data_for_datacenter(config.datacenter)
else:
self.load_all_data_consul()
self.combine_all_results()
print(json.dumps(self.inventory, sort_keys=True, indent=2))
def bulk_load(self, datacenter):
index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter)
index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter)
index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
self.inmemory_kv += groups_list
self.inmemory_kv += metadata_list
self.inmemory_nodes += nodes
def load_all_data_consul(self):
''' cycle through each of the datacenters in the consul catalog and process
the nodes in each '''
self.datacenters = self.consul_api.catalog.datacenters()
for datacenter in self.datacenters:
self.current_dc = datacenter
self.bulk_load(datacenter)
self.load_data_for_datacenter(datacenter)
def load_availability_groups(self, node, datacenter):
'''check the health of each service on a node and add the node to either
an 'available' or 'unavailable' grouping. The suffix for each group can be
controlled from the config'''
if self.config.has_config('availability'):
for service_name, service in iteritems(node['Services']):
for node in self.consul_api.health.service(service_name)[1]:
if self.is_service_available(node, service_name):
suffix = self.config.get_availability_suffix(
'available_suffix', '_available')
else:
suffix = self.config.get_availability_suffix(
'unavailable_suffix', '_unavailable')
self.add_node_to_map(self.nodes_by_availability,
service_name + suffix, node['Node'])
def is_service_available(self, node, service_name):
'''check the availability of the service on the node beside ensuring the
availability of the node itself'''
consul_ok = service_ok = False
for check in node['Checks']:
if check['CheckID'] == 'serfHealth':
consul_ok = check['Status'] == 'passing'
elif check['ServiceName'] == service_name:
service_ok = check['Status'] == 'passing'
return consul_ok and service_ok
def consul_get_kv_inmemory(self, key):
result = filter(lambda x: x['Key'] == key, self.inmemory_kv)
return result.pop() if result else None
def consul_get_node_inmemory(self, node):
result = filter(lambda x: x['Node'] == node, self.inmemory_nodes)
return {"Node": result.pop(), "Services": {}} if result else None
def load_data_for_datacenter(self, datacenter):
'''processes all the nodes in a particular datacenter'''
if self.config.bulk_load == 'true':
nodes = self.inmemory_nodes
else:
index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
for node in nodes:
self.add_node_to_map(self.nodes_by_datacenter, datacenter, node)
self.load_data_for_node(node['Node'], datacenter)
def load_data_for_node(self, node, datacenter):
'''loads the data for a single node adding it to various groups based on
metadata retrieved from the kv store and service availability'''
if self.config.suffixes == 'true':
index, node_data = self.consul_api.catalog.node(node, dc=datacenter)
else:
node_data = self.consul_get_node_inmemory(node)
node = node_data['Node']
self.add_node_to_map(self.nodes, 'all', node)
self.add_metadata(node_data, "consul_datacenter", datacenter)
self.add_metadata(node_data, "consul_nodename", node['Node'])
self.load_groups_from_kv(node_data)
self.load_node_metadata_from_kv(node_data)
if self.config.suffixes == 'true':
self.load_availability_groups(node_data, datacenter)
for name, service in node_data['Services'].items():
self.load_data_from_service(name, service, node_data)
def load_node_metadata_from_kv(self, node_data):
''' load the json dict at the metadata path defined by the kv_metadata value
and the node name add each entry in the dictionary to the node's
metadata '''
node = node_data['Node']
if self.config.has_config('kv_metadata'):
key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
if self.config.bulk_load == 'true':
metadata = self.consul_get_kv_inmemory(key)
else:
index, metadata = self.consul_api.kv.get(key)
if metadata and metadata['Value']:
try:
metadata = json.loads(metadata['Value'])
for k, v in metadata.items():
self.add_metadata(node_data, k, v)
except Exception:
pass
def load_groups_from_kv(self, node_data):
''' load the comma separated list of groups at the path defined by the
kv_groups config value and the node name add the node address to each
group found '''
node = node_data['Node']
if self.config.has_config('kv_groups'):
key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
if self.config.bulk_load == 'true':
groups = self.consul_get_kv_inmemory(key)
else:
index, groups = self.consul_api.kv.get(key)
if groups and groups['Value']:
for group in groups['Value'].decode().split(','):
self.add_node_to_map(self.nodes_by_kv, group.strip(), node)
def load_data_from_service(self, service_name, service, node_data):
'''process a service registered on a node, adding the node to a group with
the service name. Each service tag is extracted and the node is added to a
tag grouping also'''
self.add_metadata(node_data, "consul_services", service_name, True)
if self.is_service("ssh", service_name):
self.add_metadata(node_data, "ansible_ssh_port", service['Port'])
if self.config.has_config('servers_suffix'):
service_name = service_name + self.config.servers_suffix
self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node'])
self.extract_groups_from_tags(service_name, service, node_data)
def is_service(self, target, name):
return name and (name.lower() == target.lower())
def extract_groups_from_tags(self, service_name, service, node_data):
'''iterates each service tag and adds the node to groups derived from the
service and tag names e.g. nginx_master'''
if self.config.has_config('tags') and service['Tags']:
tags = service['Tags']
self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
for tag in service['Tags']:
tagname = service_name + '_' + tag
self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
def combine_all_results(self):
'''prunes and sorts all groupings for combination into the final map'''
self.inventory = {"_meta": {"hostvars": self.node_metadata}}
groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
for grouping in groupings:
for name, addresses in grouping.items():
self.inventory[name] = sorted(list(set(addresses)))
def add_metadata(self, node_data, key, value, is_list=False):
''' Pushed an element onto a metadata dict for the node, creating
the dict if it doesn't exist '''
key = self.to_safe(key)
node = self.get_inventory_name(node_data['Node'])
if node in self.node_metadata:
metadata = self.node_metadata[node]
else:
metadata = {}
self.node_metadata[node] = metadata
if is_list:
self.push(metadata, key, value)
else:
metadata[key] = value
def get_inventory_name(self, node_data):
'''return the ip or a node name that can be looked up in consul's dns'''
domain = self.config.domain
if domain:
node_name = node_data['Node']
if self.current_dc:
return '%s.node.%s.%s' % (node_name, self.current_dc, domain)
else:
return '%s.node.%s' % (node_name, domain)
else:
return node_data['Address']
def add_node_to_map(self, map, name, node):
self.push(map, name, self.get_inventory_name(node))
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the
dict '''
key = self.to_safe(key)
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used
as Ansible groups '''
return re.sub(r'[^A-Za-z0-9\-\.]', '_', word)
def sanitize_dict(self, d):
new_dict = {}
for k, v in d.items():
if v is not None:
new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
return new_dict
def sanitize_list(self, seq):
new_seq = []
for d in seq:
new_seq.append(self.sanitize_dict(d))
return new_seq
class ConsulConfig(dict):
def __init__(self):
self.read_settings()
self.read_cli_args()
self.read_env_vars()
def has_config(self, name):
if hasattr(self, name):
return getattr(self, name)
else:
return False
def read_settings(self):
''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)'''
config = configparser.SafeConfigParser()
if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'):
config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini')
else:
config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini')
config_options = ['host', 'token', 'datacenter', 'servers_suffix',
'tags', 'kv_metadata', 'kv_groups', 'availability',
'unavailable_suffix', 'available_suffix', 'url',
'domain', 'suffixes', 'bulk_load']
for option in config_options:
value = None
if config.has_option('consul', option):
value = config.get('consul', option).lower()
setattr(self, option, value)
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster')
parser.add_argument('--list', action='store_true',
help='Get all inventory variables from all nodes in the consul cluster')
parser.add_argument('--host', action='store',
help='Get all inventory variables about a specific consul node,'
'requires datacenter set in consul.ini.')
parser.add_argument('--datacenter', action='store',
help='Get all inventory about a specific consul datacenter')
args = parser.parse_args()
arg_names = ['host', 'datacenter']
for arg in arg_names:
if getattr(args, arg):
setattr(self, arg, getattr(args, arg))
def read_env_vars(self):
env_var_options = ['host', 'token', 'datacenter', 'servers_suffix',
'tags', 'kv_metadata', 'kv_groups', 'availability',
'unavailable_suffix', 'available_suffix', 'url',
'domain', 'suffixes', 'bulk_load']
for option in env_var_options:
value = None
env_var = 'CONSUL_' + option.upper()
if os.environ.get(env_var):
setattr(self, option, os.environ.get(env_var))
def get_availability_suffix(self, suffix, default):
if self.has_config(suffix):
return self.has_config(suffix)
return default
def get_consul_api(self):
'''get an instance of the api based on the supplied configuration'''
host = 'localhost'
port = 8500
token = None
scheme = 'http'
if hasattr(self, 'url'):
from ansible.module_utils.six.moves.urllib.parse import urlparse
o = urlparse(self.url)
if o.hostname:
host = o.hostname
if o.port:
port = o.port
if o.scheme:
scheme = o.scheme
if hasattr(self, 'token'):
token = self.token
if not token:
token = 'anonymous'
return consul.Consul(host=host, port=port, token=token, scheme=scheme)
ConsulInventory()

View file

@ -1,892 +0,0 @@
#!/usr/bin/env python
#
# (c) 2016 Paul Durivage <paul.durivage@gmail.com>
# Chris Houseknecht <house@redhat.com>
# James Tanner <jtanner@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
Docker Inventory Script
=======================
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
because the inventory is generated at run-time rather than being read from a static file. The script generates the
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
script contacts can be defined using environment variables or a configuration file.
Requirements
------------
Using the docker modules requires having docker-py <https://docker-py.readthedocs.io/en/stable/>
installed on the host running Ansible. To install docker-py:
pip install docker-py
Run for Specific Host
---------------------
When run for a specific container using the --host option this script returns the following hostvars:
{
"ansible_ssh_host": "",
"ansible_ssh_port": 0,
"docker_apparmorprofile": "",
"docker_args": [],
"docker_config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/hello"
],
"Domainname": "",
"Entrypoint": null,
"Env": null,
"Hostname": "9f2f80b0a702",
"Image": "hello-world",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": null,
"WorkingDir": ""
},
"docker_created": "2016-04-18T02:05:59.659599249Z",
"docker_driver": "aufs",
"docker_execdriver": "native-0.2",
"docker_execids": null,
"docker_graphdriver": {
"Data": null,
"Name": "aufs"
},
"docker_hostconfig": {
"Binds": null,
"BlkioWeight": 0,
"CapAdd": null,
"CapDrop": null,
"CgroupParent": "",
"ConsoleSize": [
0,
0
],
"ContainerIDFile": "",
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuShares": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": null,
"Dns": null,
"DnsOptions": null,
"DnsSearch": null,
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"KernelMemory": 0,
"Links": null,
"LogConfig": {
"Config": {},
"Type": "json-file"
},
"LxcConf": null,
"Memory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"NetworkMode": "default",
"OomKillDisable": false,
"PidMode": "host",
"PortBindings": null,
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"RestartPolicy": {
"MaximumRetryCount": 0,
"Name": ""
},
"SecurityOpt": [
"label:disable"
],
"UTSMode": "",
"Ulimits": null,
"VolumeDriver": "",
"VolumesFrom": null
},
"docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname",
"docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts",
"docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14",
"docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7",
"docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log",
"docker_mountlabel": "",
"docker_mounts": [],
"docker_name": "/hello-world",
"docker_networksettings": {
"Bridge": "",
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"HairpinMode": false,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"MacAddress": "",
"Networks": {
"bridge": {
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": ""
}
},
"Ports": null,
"SandboxID": "",
"SandboxKey": "",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null
},
"docker_path": "/hello",
"docker_processlabel": "",
"docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf",
"docker_restartcount": 0,
"docker_short_id": "9f2f80b0a7023",
"docker_state": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2016-04-18T02:06:00.296619369Z",
"OOMKilled": false,
"Paused": false,
"Pid": 0,
"Restarting": false,
"Running": false,
"StartedAt": "2016-04-18T02:06:00.272065041Z",
"Status": "exited"
}
}
Groups
------
When run in --list mode (the default), container instances are grouped by:
- container id
- container name
- container short id
- image_name (image_<image name>)
- stack_name (stack_<stack name>)
- service_name (service_<service name>)
- docker_host
- running
- stopped
Configuration:
--------------
You can control the behavior of the inventory script by passing arguments, defining environment variables, or
creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
is command line args, then the docker.yml file and finally environment variables.
Environment variables:
......................
To connect to a single Docker API the following variables can be defined in the environment to control the connection
options. These are the same environment variables used by the Docker modules.
DOCKER_HOST
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
DOCKER_API_VERSION:
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
DOCKER_TIMEOUT:
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
DOCKER_TLS:
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
Defaults to False.
DOCKER_TLS_VERIFY:
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
Default is False
DOCKER_TLS_HOSTNAME:
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
to localhost.
DOCKER_CERT_PATH:
Path to the directory containing the client certificate, client key and CA certificate.
DOCKER_SSL_VERSION:
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
was 1.0
In addition to the connection variables there are a couple variables used to control the execution and output of the
script:
DOCKER_CONFIG_FILE
Path to the configuration file. Defaults to ./docker.yml.
DOCKER_PRIVATE_SSH_PORT:
The private port (container port) on which SSH is listening for connections. Defaults to 22.
DOCKER_DEFAULT_IP:
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
Configuration File
..................
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
The default name of the file is derived from the name of the inventory script. By default the script will look for
basename of the script (i.e. docker) with an extension of '.yml'.
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
Here's what you can define in docker_inventory.yml:
defaults
Defines a default connection. Defaults will be taken from this and applied to any values not provided
for a host defined in the hosts list.
hosts
If you wish to get inventory from more than one Docker host, define a hosts list.
For the default host and each host in the hosts list define the following attributes:
host:
description: The URL or Unix socket path used to connect to the Docker API.
required: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
tls_verify:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
cert_path:
description: Path to the client's TLS certificate file.
default: null
required: false
cacert_path:
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
default: null
required: false
key_path:
description: Path to the client's TLS key file.
default: null
required: false
version:
description: The Docker API version.
required: false
default: will be supplied by the docker-py module.
timeout:
description: The amount of time in seconds to wait on an API response.
required: false
default: 60
default_ip:
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
'0.0.0.0'.
required: false
default: 127.0.0.1
private_ssh_port:
description: The port containers use for SSH
required: false
default: 22
Examples
--------
# Connect to the Docker API on localhost port 4243 and format the JSON output
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
# another IP address (where Ansible will attempt to connect via SSH)
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
# Run as input to a playbook:
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
# Simple playbook to invoke with the above example:
- name: Test docker_inventory
hosts: all
connection: local
gather_facts: no
tasks:
- debug: msg="Container - {{ inventory_hostname }}"
'''
import os
import sys
import json
import argparse
import re
import yaml
from collections import defaultdict
# Manipulation of the path is needed because the docker-py
# module is imported by the name docker, and because this file
# is also named docker
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
try:
del sys.path[sys.path.index(path)]
except Exception:
pass
HAS_DOCKER_PY = True
HAS_DOCKER_ERROR = False
try:
from docker.errors import APIError, TLSParameterError
from docker.tls import TLSConfig
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# Client has recently been split into DockerClient and APIClient
try:
from docker import Client
except ImportError as dummy:
try:
from docker import APIClient as Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
class Client:
pass
DEFAULT_DOCKER_CONFIG_FILE = os.path.splitext(os.path.basename(__file__))[0] + '.yml'
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = "localhost"
DEFAULT_IP = '127.0.0.1'
DEFAULT_SSH_PORT = '22'
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
DOCKER_ENV_ARGS = dict(
config_file='DOCKER_CONFIG_FILE',
docker_host='DOCKER_HOST',
api_version='DOCKER_API_VERSION',
cert_path='DOCKER_CERT_PATH',
ssl_version='DOCKER_SSL_VERSION',
tls='DOCKER_TLS',
tls_verify='DOCKER_TLS_VERIFY',
tls_hostname='DOCKER_TLS_HOSTNAME',
timeout='DOCKER_TIMEOUT',
private_ssh_port='DOCKER_DEFAULT_SSH_PORT',
default_ip='DOCKER_DEFAULT_IP',
)
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def log(msg, pretty_print=False):
if pretty_print:
print(json.dumps(msg, sort_keys=True, indent=2))
else:
print(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, auth_params, debug):
self.auth_params = auth_params
self.debug = debug
self._connect_params = self._get_connect_params()
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
def fail(self, msg):
fail(msg)
def log(self, msg, pretty_print=False):
if self.debug:
log(msg, pretty_print)
def _get_tls_config(self, **kwargs):
self.log("get_tls_config:")
for key in kwargs:
self.log(" %s: %s" % (key, kwargs[key]))
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
auth = self.auth_params
self.log("auth params:")
for key in auth:
self.log(" %s: %s" % (key, auth[key]))
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = self._get_tls_config(verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
else:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = self._get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
"You may also use TLS without verification by setting the tls parameter to true." \
% (self.auth_params['tls_hostname'], match.group(1), match.group(1))
self.fail(msg)
self.fail("SSL Exception: %s" % (error))
class EnvArgs(object):
def __init__(self):
self.config_file = None
self.docker_host = None
self.api_version = None
self.cert_path = None
self.ssl_version = None
self.tls = None
self.tls_verify = None
self.tls_hostname = None
self.timeout = None
self.default_ssh_port = None
self.default_ip = None
class DockerInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
self._env_args = self._parse_env_args()
self.groups = defaultdict(list)
self.hostvars = defaultdict(dict)
def run(self):
config_from_file = self._parse_config_file()
if not config_from_file:
config_from_file = dict()
docker_hosts = self.get_hosts(config_from_file)
for host in docker_hosts:
client = AnsibleDockerClient(host, self._args.debug)
self.get_inventory(client, host)
if not self._args.host:
self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts]
self.groups['_meta'] = dict(
hostvars=self.hostvars
)
print(self._json_format_dict(self.groups, pretty_print=self._args.pretty))
else:
print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty))
sys.exit(0)
def get_inventory(self, client, host):
ssh_port = host.get('default_ssh_port')
default_ip = host.get('default_ip')
hostname = host.get('docker_host')
try:
containers = client.containers(all=True)
except Exception as exc:
self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc)))
for container in containers:
id = container.get('Id')
short_id = id[:13]
try:
name = container.get('Names', list()).pop(0).lstrip('/')
except IndexError:
name = short_id
if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]):
try:
inspect = client.inspect_container(id)
except Exception as exc:
self.fail("Error inspecting container %s - %s" % (name, str(exc)))
running = inspect.get('State', dict()).get('Running')
# Add container to groups
image_name = inspect.get('Config', dict()).get('Image')
if image_name:
self.groups["image_%s" % (image_name)].append(name)
stack_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.stack.namespace')
if stack_name:
self.groups["stack_%s" % stack_name].append(name)
service_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.swarm.service.name')
if service_name:
self.groups["service_%s" % service_name].append(name)
self.groups[id].append(name)
self.groups[name].append(name)
if short_id not in self.groups:
self.groups[short_id].append(name)
self.groups[hostname].append(name)
if running is True:
self.groups['running'].append(name)
else:
self.groups['stopped'].append(name)
# Figure ous ssh IP and Port
try:
# Lookup the public facing port Nat'ed to ssh port.
port = client.port(container, ssh_port)[0]
except (IndexError, AttributeError, TypeError):
port = dict()
try:
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
except KeyError:
ip = ''
facts = dict(
ansible_ssh_host=ip,
ansible_ssh_port=port.get('HostPort', int()),
docker_name=name,
docker_short_id=short_id
)
for key in inspect:
fact_key = self._slugify(key)
facts[fact_key] = inspect.get(key)
self.hostvars[name].update(facts)
def _slugify(self, value):
return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
def get_hosts(self, config):
'''
Determine the list of docker hosts we need to talk to.
:param config: dictionary read from config file. can be empty.
:return: list of connection dictionaries
'''
hosts = list()
hosts_list = config.get('hosts')
defaults = config.get('defaults', dict())
self.log('defaults:')
self.log(defaults, pretty_print=True)
def_host = defaults.get('host')
def_tls = defaults.get('tls')
def_tls_verify = defaults.get('tls_verify')
def_tls_hostname = defaults.get('tls_hostname')
def_ssl_version = defaults.get('ssl_version')
def_cert_path = defaults.get('cert_path')
def_cacert_path = defaults.get('cacert_path')
def_key_path = defaults.get('key_path')
def_version = defaults.get('version')
def_timeout = defaults.get('timeout')
def_ip = defaults.get('default_ip')
def_ssh_port = defaults.get('private_ssh_port')
if hosts_list:
# use hosts from config file
for host in hosts_list:
docker_host = host.get('host') or def_host or self._args.docker_host or \
self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = host.get('version') or def_version or self._args.api_version or \
self._env_args.api_version or DEFAULT_DOCKER_API_VERSION
tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \
self._env_args.tls_hostname or DEFAULT_TLS_HOSTNAME
tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \
self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \
self._env_args.ssl_version
cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \
self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \
self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = host.get('key_path') or def_key_path or self._args.key_path or \
self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \
DEFAULT_TIMEOUT_SECONDS
default_ip = host.get('default_ip') or def_ip or self._env_args.default_ip or \
self._args.default_ip_address or DEFAULT_IP
default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \
DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
else:
# use default definition
docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = def_version or self._args.api_version or self._env_args.api_version or \
DEFAULT_DOCKER_API_VERSION
tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname or \
DEFAULT_TLS_HOSTNAME
tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version
cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = def_key_path or self._args.key_path or self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS
default_ip = def_ip or self._env_args.default_ip or self._args.default_ip_address or DEFAULT_IP
default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
self.log("hosts: ")
self.log(hosts, pretty_print=True)
return hosts
def _parse_config_file(self):
config = dict()
config_file = DEFAULT_DOCKER_CONFIG_FILE
if self._args.config_file:
config_file = self._args.config_file
elif self._env_args.config_file:
config_file = self._env_args.config_file
config_file = os.path.abspath(config_file)
if os.path.isfile(config_file):
with open(config_file) as f:
try:
config = yaml.safe_load(f.read())
except Exception as exc:
self.fail("Error: parsing %s - %s" % (config_file, str(exc)))
else:
msg = "Error: config file given by {} does not exist - " + config_file
if self._args.config_file:
self.fail(msg.format('command line argument'))
elif self._env_args.config_file:
self.fail(msg.format(DOCKER_ENV_ARGS.get('config_file')))
else:
self.log(msg.format('DEFAULT_DOCKER_CONFIG_FILE'))
return config
def log(self, msg, pretty_print=False):
if self._args.debug:
log(msg, pretty_print)
def fail(self, msg):
fail(msg)
def _parse_env_args(self):
args = EnvArgs()
for key, value in DOCKER_ENV_ARGS.items():
if os.environ.get(value):
val = os.environ.get(value)
if val in BOOLEANS_TRUE:
val = True
if val in BOOLEANS_FALSE:
val = False
setattr(args, key, val)
return args
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Return Ansible inventory for one or more Docker hosts.')
parser.add_argument('--list', action='store_true', default=True,
help='List all containers (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Only get information for a specific container.')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--config-file', action='store', default=None,
help="Name of the config file to use. Default is %s" % (DEFAULT_DOCKER_CONFIG_FILE))
parser.add_argument('--docker-host', action='store', default=None,
help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
% (DEFAULT_DOCKER_HOST))
parser.add_argument('--tls-hostname', action='store', default=None,
help="Host name to expect in TLS certs. Defaults to %s" % DEFAULT_TLS_HOSTNAME)
parser.add_argument('--api-version', action='store', default=None,
help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
parser.add_argument('--timeout', action='store', default=None,
help="Docker connection timeout in seconds. Defaults to %s"
% (DEFAULT_TIMEOUT_SECONDS))
parser.add_argument('--cacert-path', action='store', default=None,
help="Path to the TLS certificate authority pem file.")
parser.add_argument('--cert-path', action='store', default=None,
help="Path to the TLS certificate pem file.")
parser.add_argument('--key-path', action='store', default=None,
help="Path to the TLS encryption key pem file.")
parser.add_argument('--ssl-version', action='store', default=None,
help="TLS version number")
parser.add_argument('--tls', action='store_true', default=None,
help="Use TLS. Defaults to %s" % (DEFAULT_TLS))
parser.add_argument('--tls-verify', action='store_true', default=None,
help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY))
parser.add_argument('--private-ssh-port', action='store', default=None,
help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT))
parser.add_argument('--default-ip-address', action='store', default=None,
help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP))
return parser.parse_args()
def _json_format_dict(self, data, pretty_print=False):
# format inventory data for output
if pretty_print:
return json.dumps(data, sort_keys=True, indent=4)
else:
return json.dumps(data)
def main():
if not HAS_DOCKER_PY:
fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR))
DockerInventory().run()
main()

View file

@ -1,74 +0,0 @@
# This is the configuration file for the Docker inventory script: docker_inventory.py.
#
# You can define the following in this file:
#
# defaults
# Defines a default connection. Defaults will be taken from this and applied to any values not provided
# for a host defined in the hosts list.
#
# hosts
# If you wish to get inventory from more than one Docker host, define a hosts list.
#
# For the default host and each host in the hosts list define the following attributes:
#
# host:
# description: The URL or Unix socket path used to connect to the Docker API.
# required: yes
#
# tls:
# description: Connect using TLS without verifying the authenticity of the Docker host server.
# default: false
# required: false
#
# tls_verify:
# description: Connect using TLS without verifying the authenticity of the Docker host server.
# default: false
# required: false
#
# cert_path:
# description: Path to the client's TLS certificate file.
# default: null
# required: false
#
# cacert_path:
# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
# default: null
# required: false
#
# key_path:
# description: Path to the client's TLS key file.
# default: null
# required: false
#
# version:
# description: The Docker API version.
# required: false
# default: will be supplied by the docker-py module.
#
# timeout:
# description: The amount of time in seconds to wait on an API response.
# required: false
# default: 60
#
# default_ip:
# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
# '0.0.0.0'.
# required: false
# default: 127.0.0.1
#
# private_ssh_port:
# description: The port containers use for SSH
# required: false
# default: 22
#defaults:
# host: unix:///var/run/docker.sock
# private_ssh_port: 22
# default_ip: 127.0.0.1
#hosts:
# - host: tcp://10.45.5.16:4243
# private_ssh_port: 2022
# default_ip: 172.16.3.45
# - host: tcp://localhost:4243
# private_ssh_port: 2029

View file

@ -1,99 +0,0 @@
#!/usr/bin/env python
"""
fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
returns it under the host group 'coreos'
"""
# Copyright (C) 2014 Andrew Rothstein <andrew.rothstein at gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Thanks to the vagrant.py inventory script for giving me the basic structure
# of this.
#
import sys
import subprocess
import re
import string
from optparse import OptionParser
import json
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers in your fleet")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
def get_ssh_config():
configs = []
for box in list_running_boxes():
config = get_a_ssh_config(box)
configs.append(config)
return configs
# list all the running instances in the fleet
def list_running_boxes():
boxes = []
for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line)
if matcher and matcher.group(1) != "IP":
boxes.append(matcher.group(1))
return boxes
def get_a_ssh_config(box_name):
config = {}
config['Host'] = box_name
config['ansible_ssh_user'] = 'core'
config['ansible_python_interpreter'] = '/opt/bin/python'
return config
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = {'coreos': []}
for data in ssh_config:
hosts['coreos'].append(data['Host'])
print(json.dumps(hosts))
sys.exit(1)
# Get out the host details
# ------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['Host'] == options.host), ssh_config)
if len(details) > 0:
# pass through the port, in case it's non standard.
result = details[0]
print(json.dumps(result))
sys.exit(1)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(1)

View file

@ -1,200 +0,0 @@
# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory)
#
# This script can be used as an Ansible dynamic inventory.
# The connection parameters are set up via *foreman.ini*
# This is how the script founds the configuration file in
# order of discovery.
#
# * `/etc/ansible/foreman.ini`
# * Current directory of your inventory script.
# * `FOREMAN_INI_PATH` environment variable.
#
# ## Variables and Parameters
#
# The data returned from Foreman for each host is stored in a foreman
# hash so they're available as *host_vars* along with the parameters
# of the host and it's hostgroups:
#
# "foo.example.com": {
# "foreman": {
# "architecture_id": 1,
# "architecture_name": "x86_64",
# "build": false,
# "build_status": 0,
# "build_status_label": "Installed",
# "capabilities": [
# "build",
# "image"
# ],
# "compute_profile_id": 4,
# "hostgroup_name": "webtier/myapp",
# "id": 70,
# "image_name": "debian8.1",
# ...
# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77"
# },
# "foreman_params": {
# "testparam1": "foobar",
# "testparam2": "small",
# ...
# }
#
# and could therefore be used in Ansible like:
#
# - debug: msg="From Foreman host {{ foreman['uuid'] }}"
#
# Which yields
#
# TASK [test_foreman : debug] ****************************************************
# ok: [foo.example.com] => {
# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf"
# }
#
# ## Automatic Ansible groups
#
# The inventory will provide a set of groups, by default prefixed by
# 'foreman_'. If you want to customize this prefix, change the
# group_prefix option in /etc/ansible/foreman.ini. The rest of this
# guide will assume the default prefix of 'foreman'
#
# The hostgroup, location, organization, content view, and lifecycle
# environment of each host are created as Ansible groups with a
# foreman_<grouptype> prefix, all lowercase and problematic parameters
# removed. So e.g. the foreman hostgroup
#
# myapp / webtier / datacenter1
#
# would turn into the Ansible group:
#
# foreman_hostgroup_myapp_webtier_datacenter1
#
# If the parameter want_hostcollections is set to true, the
# collections each host is in are created as Ansible groups with a
# foreman_hostcollection prefix, all lowercase and problematic
# parameters removed. So e.g. the Foreman host collection
#
# Patch Window Thursday
#
# would turn into the Ansible group:
#
# foreman_hostcollection_patchwindowthursday
#
# If the parameter host_filters is set, it will be used as the
# "search" parameter for the /api/v2/hosts call. This can be used to
# restrict the list of returned host, as shown below.
#
# Furthermore Ansible groups can be created on the fly using the
# *group_patterns* variable in *foreman.ini* so that you can build up
# hierarchies using parameters on the hostgroup and host variables.
#
# Lets assume you have a host that is built using this nested hostgroup:
#
# myapp / webtier / datacenter1
#
# and each of the hostgroups defines a parameters respectively:
#
# myapp: app_param = myapp
# webtier: tier_param = webtier
# datacenter1: dc_param = datacenter1
#
# The host is also in a subnet called "mysubnet" and provisioned via an image
# then *group_patterns* like:
#
# [ansible]
# group_patterns = ["{app_param}-{tier_param}-{dc_param}",
# "{app_param}-{tier_param}",
# "{app_param}",
# "{subnet_name}-{provision_method}"]
#
# would put the host into the additional Ansible groups:
#
# - myapp-webtier-datacenter1
# - myapp-webtier
# - myapp
# - mysubnet-image
#
# by recursively resolving the hostgroups, getting the parameter keys
# and values and doing a Python *string.format()* like replacement on
# it.
#
[foreman]
url = http://localhost:3000/
user = foreman
password = secret
ssl_verify = True
# Foreman 1.24 introduces a new reports API to improve performance of the inventory script.
# Note: This requires foreman_ansible plugin installed.
# Set to False if you want to use the old API. Defaults to True.
use_reports_api = True
# Retrieve only hosts from the organization "Web Engineering".
# host_filters = organization="Web Engineering"
# Retrieve only hosts from the organization "Web Engineering" that are
# also in the host collection "Apache Servers".
# host_filters = organization="Web Engineering" and host_collection="Apache Servers"
# Foreman Inventory report related configuration options.
# Configs that default to True :
# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts
# Configs that default to False :
# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params
[report]
# want_organization = True
# want_location = True
# want_ipv4 = True
# want_ipv6 = False
# want_host_group = True
# want_subnet = True
# want_subnet_v6 = False
# want_smart_proxies = True
# want_content_facet_attributes = False
# want_host_params = False
# use this config to determine if facts are to be fetched in the report and stored on the hosts.
# want_facts = False
# Upon receiving a request to return inventory report, Foreman schedules a report generation job.
# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data
# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling.
# Defaults to 10 seconds
# poll_interval = 10
[ansible]
group_patterns = ["{app}-{tier}-{color}",
"{app}-{color}",
"{app}",
"{tier}"]
group_prefix = foreman_
# Whether to fetch facts from Foreman and store them on the host
want_facts = True
# Whether to create Ansible groups for host collections. Only tested
# with Katello (Red Hat Satellite). Disabled by default to not break
# the script for stand-alone Foreman.
want_hostcollections = False
# Whether to interpret global parameters value as JSON (if possible, else
# take as is). Only tested with Katello (Red Hat Satellite).
# This allows to define lists and dictionaries (and more complicated structures)
# variables by entering them as JSON string in Foreman parameters.
# Disabled by default as the change would else not be backward compatible.
rich_params = False
# Whether to populate the ansible_ssh_host variable to explicitly specify the
# connection target. Only tested with Katello (Red Hat Satellite).
# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated
# to permit connections where DNS resolution fails.
want_ansible_ssh_host = False
[cache]
path = .
max_age = 60
# Whether to scan foreman to add recently created hosts in inventory cache
scan_new_hosts = True

View file

@ -1,651 +0,0 @@
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>,
# Daniel Lobato Garcia <dlobatog@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This is somewhat based on cobbler inventory
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import argparse
import copy
import os
import re
import sys
from time import time, sleep
from collections import defaultdict
from distutils.version import LooseVersion, StrictVersion
# 3rd party imports
import requests
if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
print('This script requires python-requests 1.1 as a minimum version')
sys.exit(1)
from requests.auth import HTTPBasicAuth
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import configparser as ConfigParser
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
class ForemanInventory(object):
def __init__(self):
self.inventory = defaultdict(list) # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
self.params = dict() # Params of each host
self.facts = dict() # Facts of each host
self.hostgroups = dict() # host groups
self.hostcollections = dict() # host collections
self.session = None # Requests session
self.config_paths = [
"/etc/ansible/foreman.ini",
os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini',
]
env_value = os.environ.get('FOREMAN_INI_PATH')
if env_value is not None:
self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
def read_settings(self):
"""Reads the settings from the foreman.ini file"""
config = ConfigParser.SafeConfigParser()
config.read(self.config_paths)
# Foreman API related
try:
self.foreman_url = config.get('foreman', 'url')
self.foreman_user = config.get('foreman', 'user')
self.foreman_pw = config.get('foreman', 'password', raw=True)
self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
print("Error parsing configuration: %s" % e, file=sys.stderr)
return False
# Inventory Report Related
try:
self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.foreman_use_reports_api = True
try:
self.want_organization = config.getboolean('report', 'want_organization')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_organization = True
try:
self.want_location = config.getboolean('report', 'want_location')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_location = True
try:
self.want_IPv4 = config.getboolean('report', 'want_ipv4')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_IPv4 = True
try:
self.want_IPv6 = config.getboolean('report', 'want_ipv6')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_IPv6 = False
try:
self.want_host_group = config.getboolean('report', 'want_host_group')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_host_group = True
try:
self.want_host_params = config.getboolean('report', 'want_host_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_host_params = False
try:
self.want_subnet = config.getboolean('report', 'want_subnet')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_subnet = True
try:
self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_subnet_v6 = False
try:
self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_smart_proxies = True
try:
self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_content_facet_attributes = False
try:
self.report_want_facts = config.getboolean('report', 'want_facts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.report_want_facts = True
try:
self.poll_interval = config.getint('report', 'poll_interval')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.poll_interval = 10
# Ansible related
try:
group_patterns = config.get('ansible', 'group_patterns')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
group_patterns = "[]"
self.group_patterns = json.loads(group_patterns)
try:
self.group_prefix = config.get('ansible', 'group_prefix')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.group_prefix = "foreman_"
try:
self.want_facts = config.getboolean('ansible', 'want_facts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_facts = True
self.want_facts = self.want_facts and self.report_want_facts
try:
self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_hostcollections = False
try:
self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_ansible_ssh_host = False
# Do we want parameters to be interpreted if possible as JSON? (no by default)
try:
self.rich_params = config.getboolean('ansible', 'rich_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.rich_params = False
try:
self.host_filters = config.get('foreman', 'host_filters')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.host_filters = None
# Cache related
try:
cache_path = os.path.expanduser(config.get('cache', 'path'))
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
cache_path = '.'
(script, ext) = os.path.splitext(os.path.basename(__file__))
self.cache_path_cache = cache_path + "/%s.cache" % script
self.cache_path_inventory = cache_path + "/%s.index" % script
self.cache_path_params = cache_path + "/%s.params" % script
self.cache_path_facts = cache_path + "/%s.facts" % script
self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
try:
self.cache_max_age = config.getint('cache', 'max_age')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.cache_max_age = 60
try:
self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.scan_new_hosts = False
return True
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
self.args = parser.parse_args()
def _get_session(self):
if not self.session:
self.session = requests.session()
self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw)
self.session.verify = self.foreman_ssl_verify
return self.session
def _get_json(self, url, ignore_errors=None, params=None):
if params is None:
params = {}
params['per_page'] = 250
page = 1
results = []
s = self._get_session()
while True:
params['page'] = page
ret = s.get(url, params=params)
if ignore_errors and ret.status_code in ignore_errors:
break
ret.raise_for_status()
json = ret.json()
# /hosts/:id has not results key
if 'results' not in json:
return json
# Facts are returned as dict in results not list
if isinstance(json['results'], dict):
return json['results']
# List of all hosts is returned paginaged
results = results + json['results']
if len(results) >= json['subtotal']:
break
page += 1
if len(json['results']) == 0:
print("Did not make any progress during loop. "
"expected %d got %d" % (json['total'], len(results)),
file=sys.stderr)
break
return results
def _use_inventory_report(self):
if not self.foreman_use_reports_api:
return False
status_url = "%s/api/v2/status" % self.foreman_url
result = self._get_json(status_url)
foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0'))
return foreman_version
def _fetch_params(self):
options, params = ("no", "yes"), dict()
params["Organization"] = options[self.want_organization]
params["Location"] = options[self.want_location]
params["IPv4"] = options[self.want_IPv4]
params["IPv6"] = options[self.want_IPv6]
params["Facts"] = options[self.want_facts]
params["Host Group"] = options[self.want_host_group]
params["Host Collections"] = options[self.want_hostcollections]
params["Subnet"] = options[self.want_subnet]
params["Subnet v6"] = options[self.want_subnet_v6]
params["Smart Proxies"] = options[self.want_smart_proxies]
params["Content Attributes"] = options[self.want_content_facet_attributes]
params["Host Parameters"] = options[self.want_host_params]
if self.host_filters:
params["Hosts"] = self.host_filters
return params
def _post_request(self):
url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url
session = self._get_session()
params = {'input_values': self._fetch_params()}
ret = session.post(url, json=params)
if not ret:
raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!")
url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url'))
response = session.get(url)
while response:
if response.status_code != 204:
break
else:
sleep(self.poll_interval)
response = session.get(url)
if not response:
raise Exception("Error receiving inventory report from foreman. Please check foreman logs!")
else:
return response.json()
def _get_hosts(self):
url = "%s/api/v2/hosts" % self.foreman_url
params = {}
if self.host_filters:
params['search'] = self.host_filters
return self._get_json(url, params=params)
def _get_host_data_by_id(self, hid):
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
return self._get_json(url)
def _get_facts_by_id(self, hid):
url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
return self._get_json(url)
def _resolve_params(self, host_params):
"""Convert host params to dict"""
params = {}
for param in host_params:
name = param['name']
if self.rich_params:
try:
params[name] = json.loads(param['value'])
except ValueError:
params[name] = param['value']
else:
params[name] = param['value']
return params
def _get_facts(self, host):
"""Fetch all host facts of the host"""
if not self.want_facts:
return {}
ret = self._get_facts_by_id(host['id'])
if len(ret.values()) == 0:
facts = {}
elif len(ret.values()) == 1:
facts = list(ret.values())[0]
else:
raise ValueError("More than one set of facts returned for '%s'" % host)
return facts
def write_to_cache(self, data, filename):
"""Write data in JSON format to a file"""
json_data = json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def _write_cache(self):
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
self.write_to_cache(self.params, self.cache_path_params)
self.write_to_cache(self.facts, self.cache_path_facts)
self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores
so they can be used as Ansible groups
>>> ForemanInventory.to_safe("foo-bar baz")
'foo_barbaz'
'''
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
def update_cache(self, scan_only_new_hosts=False):
"""Make calls to foreman and save the output in a cache"""
use_inventory_report = self._use_inventory_report()
if use_inventory_report:
self._update_cache_inventory(scan_only_new_hosts)
else:
self._update_cache_host_api(scan_only_new_hosts)
def _update_cache_inventory(self, scan_only_new_hosts):
self.groups = dict()
self.hosts = dict()
try:
inventory_report_response = self._post_request()
except Exception:
self._update_cache_host_api(scan_only_new_hosts)
return
host_data = json.loads(inventory_report_response)
for host in host_data:
if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts):
continue
dns_name = host['name']
host_params = host.pop('host_parameters', {})
fact_list = host.pop('facts', {})
content_facet_attributes = host.get('content_attributes', {}) or {}
# Create ansible groups for hostgroup
group = 'host_group'
val = host.get(group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
# Create ansible groups for environment, location and organization
for group in ['environment', 'location', 'organization']:
val = host.get('%s' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
for group in ['lifecycle_environment', 'content_view']:
val = content_facet_attributes.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
params = host_params
# Ansible groups by parameters in host groups and Foreman host
# attributes.
groupby = dict()
for k, v in params.items():
groupby[k] = self.to_safe(to_text(v))
# The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns:
try:
key = pattern.format(**groupby)
self.inventory[key].append(dns_name)
except KeyError:
pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host
self.params[dns_name] = params
self.facts[dns_name] = fact_list
self.inventory['all'].append(dns_name)
self._write_cache()
def _update_cache_host_api(self, scan_only_new_hosts):
"""Make calls to foreman and save the output in a cache"""
self.groups = dict()
self.hosts = dict()
for host in self._get_hosts():
if host['name'] in self.cache.keys() and scan_only_new_hosts:
continue
dns_name = host['name']
host_data = self._get_host_data_by_id(host['id'])
host_params = host_data.get('all_parameters', {})
# Create ansible groups for hostgroup
group = 'hostgroup'
val = host.get('%s_title' % group) or host.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
# Create ansible groups for environment, location and organization
for group in ['environment', 'location', 'organization']:
val = host.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
for group in ['lifecycle_environment', 'content_view']:
val = host.get('content_facet_attributes', {}).get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
params = self._resolve_params(host_params)
# Ansible groups by parameters in host groups and Foreman host
# attributes.
groupby = dict()
for k, v in params.items():
groupby[k] = self.to_safe(to_text(v))
# The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns:
try:
key = pattern.format(**groupby)
self.inventory[key].append(dns_name)
except KeyError:
pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host_data.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host
self.params[dns_name] = params
self.facts[dns_name] = self._get_facts(host)
self.inventory['all'].append(dns_name)
self._write_cache()
def is_cache_valid(self):
"""Determines if the cache is still valid"""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if (os.path.isfile(self.cache_path_inventory) and
os.path.isfile(self.cache_path_params) and
os.path.isfile(self.cache_path_facts)):
return True
return False
def load_inventory_from_cache(self):
"""Read the index from the cache file sets self.index"""
with open(self.cache_path_inventory, 'r') as fp:
self.inventory = json.load(fp)
def load_params_from_cache(self):
"""Read the index from the cache file sets self.index"""
with open(self.cache_path_params, 'r') as fp:
self.params = json.load(fp)
def load_facts_from_cache(self):
"""Read the index from the cache file sets self.facts"""
if not self.want_facts:
return
with open(self.cache_path_facts, 'r') as fp:
self.facts = json.load(fp)
def load_hostcollections_from_cache(self):
"""Read the index from the cache file sets self.hostcollections"""
if not self.want_hostcollections:
return
with open(self.cache_path_hostcollections, 'r') as fp:
self.hostcollections = json.load(fp)
def load_cache_from_cache(self):
"""Read the cache from the cache file sets self.cache"""
with open(self.cache_path_cache, 'r') as fp:
self.cache = json.load(fp)
def get_inventory(self):
if self.args.refresh_cache or not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_params_from_cache()
self.load_facts_from_cache()
self.load_hostcollections_from_cache()
self.load_cache_from_cache()
if self.scan_new_hosts:
self.update_cache(True)
def get_host_info(self):
"""Get variables about a specific host"""
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return json_format_dict({}, True)
return json_format_dict(self.cache[self.args.host], True)
def _print_data(self):
data_to_print = ""
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {
'foreman': self.cache[hostname],
'foreman_params': self.params[hostname],
}
if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]:
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip']
if self.want_facts:
self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname]
data_to_print += json_format_dict(self.inventory, True)
print(data_to_print)
def run(self):
# Read settings and parse CLI arguments
if not self.read_settings():
return False
self.parse_cli_args()
self.get_inventory()
self._print_data()
return True
if __name__ == '__main__':
sys.exit(not ForemanInventory().run())

View file

@ -1,126 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
from distutils.version import LooseVersion
import json
import os
import sys
from ipalib import api, errors, __version__ as IPA_VERSION
from ansible.module_utils.six import u
def initialize():
'''
This function initializes the FreeIPA/IPA API. This function requires
no arguments. A kerberos key must be present in the users keyring in
order for this to work. IPA default configuration directory is /etc/ipa,
this path could be overridden with IPA_CONFDIR environment variable.
'''
api.bootstrap(context='cli')
if not os.path.isdir(api.env.confdir):
print("WARNING: IPA configuration directory (%s) is missing. "
"Environment variable IPA_CONFDIR could be used to override "
"default path." % api.env.confdir)
if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'):
# With ipalib < 4.6.0 'server' and 'domain' have default values
# ('localhost:8888', 'example.com'), newer versions don't and
# DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is
# required.
# ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132)
# that's why 4.6.2 is explicitely tested.
if 'server' not in api.env or 'domain' not in api.env:
sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not "
"defined in '[global]' section of '%s' nor in '%s'." %
(api.env.conf, api.env.conf_default))
api.finalize()
try:
api.Backend.rpcclient.connect()
except AttributeError:
# FreeIPA < 4.0 compatibility
api.Backend.xmlclient.connect()
return api
def list_groups(api):
'''
This function prints a list of all host groups. This function requires
one argument, the FreeIPA/IPA API object.
'''
inventory = {}
hostvars = {}
result = api.Command.hostgroup_find(all=True)['result']
for hostgroup in result:
# Get direct and indirect members (nested hostgroups) of hostgroup
members = []
if 'member_host' in hostgroup:
members = list(hostgroup['member_host'])
if 'memberindirect_host' in hostgroup:
members += (host for host in hostgroup['memberindirect_host'])
inventory[hostgroup['cn'][0]] = {'hosts': list(members)}
for member in members:
hostvars[member] = {}
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print(inv_string)
return None
def parse_args():
'''
This function parses the arguments that were passed in via the command line.
This function expects no arguments.
'''
parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specified host')
return parser.parse_args()
def get_host_attributes(api, host):
"""
This function expects one string, this hostname to lookup variables for.
Args:
api: FreeIPA API Object
host: Name of Hostname
Returns: Dict of Host vars if found else None
"""
try:
result = api.Command.host_show(u(host))['result']
if 'usercertificate' in result:
del result['usercertificate']
return json.dumps(result, indent=1)
except errors.NotFound as e:
return {}
if __name__ == '__main__':
args = parse_args()
api = initialize()
if args.host:
print(get_host_attributes(api, args.host))
elif args.list:
list_groups(api)

View file

@ -1,129 +0,0 @@
#!/usr/bin/env python
#
# (c) 2018, Red Hat, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import json
import argparse
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils.six import iteritems, raise_from
from ansible.module_utils._text import to_text
try:
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiInventory
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
except ImportError as exc:
try:
# Fallback for Ansible 2.9
from ansible.module_utils.net_tools.nios.api import WapiInventory
from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
except ImportError:
raise_from(
Exception(
'This inventory plugin only works with Ansible 2.9, 2.10, or 3, or when community.general is installed correctly in PYTHONPATH.'
' Try using the inventory plugin from infoblox.nios_modules instead.'),
exc)
CONFIG_FILES = [
os.environ.get('INFOBLOX_CONFIG_FILE', ''),
'/etc/ansible/infoblox.yaml',
'/etc/ansible/infoblox.yml'
]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true',
help='List host records from NIOS for use in Ansible')
parser.add_argument('--host',
help='List meta data about single host (not used)')
return parser.parse_args()
def main():
args = parse_args()
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
break
else:
sys.stderr.write('unable to locate config file at /etc/ansible/infoblox.yaml\n')
sys.exit(-1)
try:
loader = DataLoader()
config = loader.load_from_file(config_file)
provider = config.get('provider') or {}
wapi = WapiInventory(provider)
except Exception as exc:
sys.stderr.write(to_text(exc))
sys.exit(-1)
if args.host:
host_filter = {'name': args.host}
else:
host_filter = {}
config_filters = config.get('filters')
if config_filters.get('view') is not None:
host_filter['view'] = config_filters['view']
if config_filters.get('extattrs'):
extattrs = normalize_extattrs(config_filters['extattrs'])
else:
extattrs = {}
hostvars = {}
inventory = {
'_meta': {
'hostvars': hostvars
}
}
return_fields = ['name', 'view', 'extattrs', 'ipv4addrs']
hosts = wapi.get_object('record:host',
host_filter,
extattrs=extattrs,
return_fields=return_fields)
if hosts:
for item in hosts:
view = item['view']
name = item['name']
if view not in inventory:
inventory[view] = {'hosts': []}
inventory[view]['hosts'].append(name)
hostvars[name] = {
'view': view
}
if item.get('extattrs'):
for key, value in iteritems(flatten_extattrs(item['extattrs'])):
if key.startswith('ansible_'):
hostvars[name][key] = value
else:
if 'extattrs' not in hostvars[name]:
hostvars[name]['extattrs'] = {}
hostvars[name]['extattrs'][key] = value
sys.stdout.write(json.dumps(inventory, indent=4))
sys.exit(0)
if __name__ == '__main__':
main()

View file

@ -1,24 +0,0 @@
---
# This file provides the configuration information for the Infoblox dynamic
# inventory script that is used to dynamically pull host information from NIOS.
# This file should be copied to /etc/ansible/infoblox.yaml in order for the
# dynamic script to find it.
# Sets the provider arguments for authenticating to the Infoblox server to
# retrieve inventory hosts. Provider arguments can also be set using
# environment variables. Supported environment variables all start with
# INFOBLOX_{{ name }}. For instance, to set the host provider value, the
# environment variable would be INFOBLOX_HOST.
provider:
host: <SERVER_IP>
username: <USERNAME>
password: <PASSWORD>
# Filters allow the dynamic inventory script to restrict the set of hosts that
# are returned from the Infoblox server.
filters:
# restrict returned hosts by extensible attributes
extattrs: {}
# restrict returned hosts to a specified DNS view
view: null

View file

@ -1,27 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from subprocess import Popen, PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'jail'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")

View file

@ -1,117 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Dynamic inventory script which lets you use nodes discovered by Canonical's
# Landscape (http://www.ubuntu.com/management/landscape-features).
#
# Requires the `landscape_api` Python module
# See:
# - https://landscape.canonical.com/static/doc/api/api-client-package.html
# - https://landscape.canonical.com/static/doc/api/python-api.html
#
# Environment variables
# ---------------------
# - `LANDSCAPE_API_URI`
# - `LANDSCAPE_API_KEY`
# - `LANDSCAPE_API_SECRET`
# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
import argparse
import collections
import os
import sys
from landscape_api.base import API, HTTPError
import json
_key = 'landscape'
class EnvironmentConfig(object):
uri = os.getenv('LANDSCAPE_API_URI')
access_key = os.getenv('LANDSCAPE_API_KEY')
secret_key = os.getenv('LANDSCAPE_API_SECRET')
ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
def _landscape_client():
env = EnvironmentConfig()
return API(
uri=env.uri,
access_key=env.access_key,
secret_key=env.secret_key,
ssl_ca_file=env.ssl_ca_file)
def get_landscape_members_data():
return _landscape_client().get_computers()
def get_nodes(data):
return [node['hostname'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for value in node['tags']:
groups[value].append(node['hostname'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['hostname']] = {'tags': node['tags']}
return meta
def print_list():
data = get_landscape_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_landscape_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from landscape cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from landscape cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])

View file

@ -1,15 +0,0 @@
# Ansible Apache Libcloud Generic inventory script
[driver]
provider = CLOUDSTACK
host =
path =
secure = True
verify_ssl_cert = True
key =
secret =
[cache]
cache_path=/path/to/your/cache
cache_max_age=60

View file

@ -1,18 +0,0 @@
# Ansible Linode external inventory script settings
#
[linode]
# API calls to Linode are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-Linode.cache
# - ansible-Linode.index
cache_path = /tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
cache_max_age = 300
# If set to true use the hosts public ip in the dictionary instead of the label
use_public_ip = false

View file

@ -1,338 +0,0 @@
#!/usr/bin/env python
'''
Linode external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Linode using the Chube library.
NOTE: This script assumes Ansible is being executed where Chube is already
installed and has a valid config at ~/.chube. If not, run:
pip install chube
echo -e "---\napi_key: <YOUR API KEY GOES HERE>" > ~/.chube
For more details, see: https://github.com/exosite/chube
NOTE: By default, this script also assumes that the Linodes in your account all have
labels that correspond to hostnames that are in your resolver search path.
Your resolver search path resides in /etc/hosts.
Optionally, if you would like to use the hosts public IP instead of it's label use
the following setting in linode.ini:
use_public_ip = true
When run against a specific host, this script returns the following variables:
- api_id
- datacenter_id
- datacenter_city (lowercase city name of data center, e.g. 'tokyo')
- label
- display_group
- create_dt
- total_hd
- total_xfer
- total_ram
- status
- public_ip (The first public IP found)
- private_ip (The first private IP found, or empty string if none)
- alert_cpu_enabled
- alert_cpu_threshold
- alert_diskio_enabled
- alert_diskio_threshold
- alert_bwin_enabled
- alert_bwin_threshold
- alert_bwout_enabled
- alert_bwout_threshold
- alert_bwquota_enabled
- alert_bwquota_threshold
- backup_weekly_daily
- backup_window
- watchdog
Peter Sankauskas did most of the legwork here with his linode plugin; I
just adapted that for Linode.
'''
# (c) 2013, Dan Slimmon
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
# Standard imports
import os
import re
import sys
import argparse
from time import time
import json
try:
from chube import load_chube_config
from chube import api as chube_api
from chube.datacenter import Datacenter
from chube.linode_obj import Linode
except Exception:
try:
# remove local paths and other stuff that may
# cause an import conflict, as chube is sensitive
# to name collisions on importing
old_path = sys.path
sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))]
from chube import load_chube_config
from chube import api as chube_api
from chube.datacenter import Datacenter
from chube.linode_obj import Linode
sys.path = old_path
except Exception as e:
raise Exception("could not import chube")
load_chube_config()
# Imports for ansible
from ansible.module_utils.six.moves import configparser as ConfigParser
class LinodeInventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
"""Main execution path."""
# Inventory grouped by display group
self.inventory = self._empty_inventory()
# Index of label to Linode ID
self.index = {}
# Local cache of Datacenter objects populated by populate_datacenter_cache()
self._datacenter_cache = None
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of nodes for inventory
if len(self.inventory) == 1:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
"""Determines if the cache file has expired, or if it is still valid."""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
"""Reads the settings from the .ini file."""
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')
# Cache related
cache_path = config.get('linode', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-linode.cache"
self.cache_path_index = cache_path + "/ansible-linode.index"
self.cache_max_age = config.getint('linode', 'cache_max_age')
self.use_public_ip = config.getboolean('linode', 'use_public_ip')
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode')
parser.add_argument('--list', action='store_true', default=True,
help='List nodes (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific node')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
"""Do API calls, and save data in cache files."""
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
"""Makes an Linode API call to get the list of nodes."""
try:
for node in Linode.search(status=Linode.STATUS_RUNNING):
self.add_node(node)
except chube_api.linode_api.ApiError as e:
sys.exit("Looks like Linode's API is down:\n %s" % e)
def get_node(self, linode_id):
"""Gets details about a specific node."""
try:
return Linode.find(api_id=linode_id)
except chube_api.linode_api.ApiError as e:
sys.exit("Looks like Linode's API is down:\n%s" % e)
def populate_datacenter_cache(self):
"""Creates self._datacenter_cache, containing all Datacenters indexed by ID."""
self._datacenter_cache = {}
dcs = Datacenter.search()
for dc in dcs:
self._datacenter_cache[dc.api_id] = dc
def get_datacenter_city(self, node):
"""Returns a the lowercase city name of the node's data center."""
if self._datacenter_cache is None:
self.populate_datacenter_cache()
location = self._datacenter_cache[node.datacenter_id].location
location = location.lower()
location = location.split(",")[0]
return location
def add_node(self, node):
"""Adds an node to the inventory and index."""
if self.use_public_ip:
dest = self.get_node_public_ip(node)
else:
dest = node.label
# Add to index
self.index[dest] = node.api_id
# Inventory: Group by node ID (always a group of 1)
self.inventory[node.api_id] = [dest]
# Inventory: Group by datacenter city
self.push(self.inventory, self.get_datacenter_city(node), dest)
# Inventory: Group by display group
self.push(self.inventory, node.display_group, dest)
# Inventory: Add a "linode" global tag group
self.push(self.inventory, "linode", dest)
# Add host info to hostvars
self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node)
def get_node_public_ip(self, node):
"""Returns a the public IP address of the node"""
return [addr.address for addr in node.ipaddresses if addr.is_public][0]
def get_host_info(self):
"""Get variables about a specific host."""
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
node_id = self.index[self.args.host]
node = self.get_node(node_id)
return self.json_format_dict(self._get_host_info(node), True)
def _get_host_info(self, node):
node_vars = {}
for direct_attr in [
"api_id",
"datacenter_id",
"label",
"display_group",
"create_dt",
"total_hd",
"total_xfer",
"total_ram",
"status",
"alert_cpu_enabled",
"alert_cpu_threshold",
"alert_diskio_enabled",
"alert_diskio_threshold",
"alert_bwin_enabled",
"alert_bwin_threshold",
"alert_bwout_enabled",
"alert_bwout_threshold",
"alert_bwquota_enabled",
"alert_bwquota_threshold",
"backup_weekly_daily",
"backup_window",
"watchdog"
]:
node_vars[direct_attr] = getattr(node, direct_attr)
node_vars["datacenter_city"] = self.get_datacenter_city(node)
node_vars["public_ip"] = self.get_node_public_ip(node)
# Set the SSH host information, so these inventory items can be used if
# their labels aren't FQDNs
node_vars['ansible_ssh_host'] = node_vars["public_ip"]
node_vars['ansible_host'] = node_vars["public_ip"]
private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public]
if private_ips:
node_vars["private_ip"] = private_ips[0]
return node_vars
def push(self, my_dict, key, element):
"""Pushed an element onto an array that may not have been defined in the dict."""
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
"""Reads the inventory from the cache file and returns it as a JSON object."""
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
"""Reads the index from the cache file and sets self.index."""
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
"""Writes data in JSON format to a file."""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
"""Escapes any characters that would be invalid in an ansible group name."""
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string."""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
LinodeInventory()

View file

@ -1,60 +0,0 @@
#!/usr/bin/env python
#
# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH
# <florian@hastexo.com>
# Based in part on:
# libvirt_lxc.py, (c) 2013, Michael Scherer <misc@zarb.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Ansible inventory script for LXC containers. Requires Python
bindings for LXC API.
In LXC, containers can be grouped by setting the lxc.group option,
which may be found more than once in a container's
configuration. So, we enumerate all containers, fetch their list
of groups, and then build the dictionary in the way Ansible expects
it.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import lxc
import json
def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the
"all" group."""
# Enumerate all containers, and list the groups they are in. Also,
# implicitly add every container to the 'all' group.
containers = dict([(c,
['all'] +
(lxc.Container(c).get_config_item('lxc.group') or []))
for c in lxc.list_containers()])
# Extract the groups, flatten the list, and remove duplicates
groups = set(sum(containers.values(), []))
# Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection': 'lxc'}}) for g in groups])
def main(argv):
"""Returns a JSON dictionary as expected by Ansible"""
result = build_dict()
if len(argv) == 2 and argv[1] == '--list':
json.dump(result, sys.stdout)
elif len(argv) == 3 and argv[1] == '--host':
json.dump({'ansible_connection': 'lxc'}, sys.stdout)
else:
print("Need an argument, either --list or --host <host>", file=sys.stderr)
if __name__ == '__main__':
main(sys.argv)

View file

@ -1,13 +0,0 @@
# LXD external inventory script settings
[lxd]
# The default resource
#resource = local:
# The group name to add the hosts to
#group = lxd
# The connection type to return for these hosts - lxd hasn't been tested yet
#connection = lxd
connection = smart

View file

@ -1,93 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2014, Hiroaki Nakamura <hnakamur@gmail.com>
# (c) 2016, Andew Clarke <andrew@oscailte.org>
#
# This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible,
# and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py
#
# NOTE, this file has some obvious limitations, improvements welcome
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from subprocess import Popen, PIPE
import distutils.spawn
import sys
import json
from ansible.module_utils.six.moves import configparser
# Set up defaults
resource = 'local:'
group = 'lxd'
connection = 'lxd'
hosts = {}
result = {}
# Read the settings from the lxd.ini file
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini')
if config.has_option('lxd', 'resource'):
resource = config.get('lxd', 'resource')
if config.has_option('lxd', 'group'):
group = config.get('lxd', 'group')
if config.has_option('lxd', 'connection'):
connection = config.get('lxd', 'connection')
# Ensure executable exists
if distutils.spawn.find_executable('lxc'):
# Set up containers result and hosts array
result[group] = {}
result[group]['hosts'] = []
# Run the command and load json result
pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True)
lxdjson = json.load(pipe.stdout)
# Iterate the json lxd output
for item in lxdjson:
# Check state and network
if 'state' in item and item['state'] is not None and 'network' in item['state']:
network = item['state']['network']
# Check for eth0 and addresses
if 'eth0' in network and 'addresses' in network['eth0']:
addresses = network['eth0']['addresses']
# Iterate addresses
for address in addresses:
# Only return inet family addresses
if 'family' in address and address['family'] == 'inet':
if 'address' in address:
ip = address['address']
name = item['name']
# Add the host to the results and the host array
result[group]['hosts'].append(name)
hosts[name] = ip
# Set the other containers result values
result[group]['vars'] = {}
result[group]['vars']['ansible_connection'] = connection
# Process arguments
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
if sys.argv[2] == 'localhost':
print(json.dumps({'ansible_connection': 'local'}))
else:
if connection == 'lxd':
print(json.dumps({'ansible_connection': connection}))
else:
print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]}))
else:
print("Need an argument, either --list or --host <host>")

View file

@ -1,17 +0,0 @@
[mdt]
# Set the MDT server to connect to
server = localhost.example.com
# Set the MDT Instance
instance = EXAMPLEINSTANCE
# Set the MDT database
database = MDTDB
# Configure login credentials
user = local.domain\admin
password = adminpassword
[tower]
groupname = mdt

View file

@ -1,122 +0,0 @@
#!/usr/bin/env python
# (c) 2016, Julian Barnett <jbarnett@tableau.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
MDT external inventory script
=================================
author: J Barnett 06/23/2016 01:15
maintainer: J Barnett (github @jbarnett1981)
'''
import argparse
import json
import pymssql
from ansible.module_utils.six.moves import configparser
class MDTInventory(object):
def __init__(self):
''' Main execution path '''
self.conn = None
# Initialize empty inventory
self.inventory = self._empty_inventory()
# Read CLI arguments
self.read_settings()
self.parse_cli_args()
# Get Hosts
if self.args.list:
self.get_hosts()
# Get specific host vars
if self.args.host:
self.get_hosts(self.args.host)
def _connect(self, query):
'''
Connect to MDT and dump contents of dbo.ComputerIdentity database
'''
if not self.conn:
self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password,
database=self.mdt_database)
cursor = self.conn.cursor()
cursor.execute(query)
self.mdt_dump = cursor.fetchall()
self.conn.close()
def get_hosts(self, hostname=False):
'''
Gets host from MDT Database
'''
if hostname:
query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role "
"FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname)
else:
query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID'
self._connect(query)
# Configure to group name configured in Ansible Tower for this inventory
groupname = self.mdt_groupname
# Initialize empty host list
hostlist = []
# Parse through db dump and populate inventory
for hosts in self.mdt_dump:
self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]}
hostlist.append(hosts[1])
self.inventory[groupname] = hostlist
# Print it all out
print(json.dumps(self.inventory, indent=2))
def _empty_inventory(self):
'''
Create empty inventory dictionary
'''
return {"_meta": {"hostvars": {}}}
def read_settings(self):
'''
Reads the settings from the mdt.ini file
'''
config = configparser.SafeConfigParser()
config.read('mdt.ini')
# MDT Server and instance and database
self.mdt_server = config.get('mdt', 'server')
self.mdt_instance = config.get('mdt', 'instance')
self.mdt_database = config.get('mdt', 'database')
# MDT Login credentials
if config.has_option('mdt', 'user'):
self.mdt_user = config.get('mdt', 'user')
if config.has_option('mdt', 'password'):
self.mdt_password = config.get('mdt', 'password')
# Group name in Tower
if config.has_option('tower', 'groupname'):
self.mdt_groupname = config.get('tower', 'groupname')
def parse_cli_args(self):
'''
Command line argument processing
'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT')
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
if __name__ == "__main__":
# Run the script
MDTInventory()

View file

@ -1,41 +0,0 @@
# Ansible Nagios external inventory script settings
#
# To get all available possibilities, check following URL:
# http://www.naemon.org/documentation/usersguide/livestatus.html
# https://mathias-kettner.de/checkmk_livestatus.html
#
[local]
# Livestatus URI
# Example for default naemon livestatus unix socket :
# livestatus_uri=unix:/var/cache/naemon/live
[remote]
# default field name for host: name
# Uncomment to override:
# host_field=address
#
# default field group for host: groups
# Uncomment to override:
# group_field=state
# default fields retrieved: address, alias, display_name, children, parents
# To override, uncomment the following line
# fields_to_retrieve=address,alias,display_name
#
# default variable prefix: livestatus_
# To override, uncomment the following line
# var_prefix=naemon_
#
# default filter: None
#
# Uncomment to override
#
# All host with state = OK
# host_filter=state = 0
# Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions.
#
# All host in groups Linux
# host_filter=groups >= Linux
#
livestatus_uri=tcp:192.168.66.137:6557

View file

@ -1,163 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Yannig Perre <yannig.perre@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Nagios livestatus inventory script. Before using this script, please
update nagios_livestatus.ini file.
Livestatus is a nagios/naemon/shinken module which let you retrieve
informations stored in the monitoring core.
This plugin inventory need livestatus API for python. Please install it
before using this script (apt/pip/yum/...).
Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html
Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html
'''
import os
import re
import argparse
import sys
from ansible.module_utils.six.moves import configparser
import json
try:
from mk_livestatus import Socket
except ImportError:
sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus")
class NagiosLivestatusInventory(object):
def parse_ini_file(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini')
for section in config.sections():
if not config.has_option(section, 'livestatus_uri'):
continue
# If fields_to_retrieve is not set, using default fields
fields_to_retrieve = self.default_fields_to_retrieve
if config.has_option(section, 'fields_to_retrieve'):
fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')]
fields_to_retrieve = tuple(fields_to_retrieve)
# default section values
section_values = {
'var_prefix': 'livestatus_',
'host_filter': None,
'host_field': 'name',
'group_field': 'groups'
}
for key, value in section_values.items():
if config.has_option(section, key):
section_values[key] = config.get(section, key).strip()
# Retrieving livestatus string connection
livestatus_uri = config.get(section, 'livestatus_uri')
backend_definition = None
# Local unix socket
unix_match = re.match('unix:(.*)', livestatus_uri)
if unix_match is not None:
backend_definition = {'connection': unix_match.group(1)}
# Remote tcp connection
tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri)
if tcp_match is not None:
backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))}
# No valid livestatus_uri => exiting
if backend_definition is None:
raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri)
# Updating backend_definition with current value
backend_definition['name'] = section
backend_definition['fields'] = fields_to_retrieve
for key, value in section_values.items():
backend_definition[key] = value
self.backends.append(backend_definition)
def parse_options(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
parser.add_argument('--pretty', action='store_true')
self.options = parser.parse_args()
def add_host(self, hostname, group):
if group not in self.result:
self.result[group] = {}
self.result[group]['hosts'] = []
if hostname not in self.result[group]['hosts']:
self.result[group]['hosts'].append(hostname)
def query_backend(self, backend, host=None):
'''Query a livestatus backend'''
hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field'])
if backend['host_filter'] is not None:
hosts_request = hosts_request.filter(backend['host_filter'])
if host is not None:
hosts_request = hosts_request.filter('name = ' + host[0])
hosts_request._columns += backend['fields']
hosts = hosts_request.call()
for host in hosts:
hostname = host[backend['host_field']]
hostgroups = host[backend['group_field']]
if not isinstance(hostgroups, list):
hostgroups = [hostgroups]
self.add_host(hostname, 'all')
self.add_host(hostname, backend['name'])
for group in hostgroups:
self.add_host(hostname, group)
for field in backend['fields']:
var_name = backend['var_prefix'] + field
if hostname not in self.result['_meta']['hostvars']:
self.result['_meta']['hostvars'][hostname] = {}
self.result['_meta']['hostvars'][hostname][var_name] = host[field]
def __init__(self):
self.defaultgroup = 'group_all'
self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents')
self.backends = []
self.options = None
self.parse_ini_file()
self.parse_options()
self.result = {}
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
self.json_indent = None
if self.options.pretty:
self.json_indent = 2
if len(self.backends) == 0:
sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.")
for backend in self.backends:
self.query_backend(backend, self.options.host)
if self.options.host:
print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent))
elif self.options.list:
print(json.dumps(self.result, indent=self.json_indent))
else:
sys.exit("usage: --list or --host HOSTNAME [--pretty]")
NagiosLivestatusInventory()

View file

@ -1,10 +0,0 @@
# Ansible Nagios external inventory script settings
#
[ndo]
# NDO database URI
# Make sure that data is returned as strings and not bytes if using python 3.
# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html
# for supported databases and URI format.
# Example for mysqlclient module :
database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1

View file

@ -1,95 +0,0 @@
#!/usr/bin/env python
# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Nagios NDO external inventory script.
========================================
Returns hosts and hostgroups from Nagios NDO.
Configuration is read from `nagios_ndo.ini`.
"""
import os
import argparse
import sys
from ansible.module_utils.six.moves import configparser
import json
try:
from sqlalchemy import text
from sqlalchemy.engine import create_engine
except ImportError:
sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
class NagiosNDOInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
if config.has_option('ndo', 'database_uri'):
self.ndo_database_uri = config.get('ndo', 'database_uri')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def get_hosts(self):
engine = create_engine(self.ndo_database_uri)
connection = engine.connect()
select_hosts = text("SELECT display_name \
FROM nagios_hosts")
select_hostgroups = text("SELECT alias \
FROM nagios_hostgroups")
select_hostgroup_hosts = text("SELECT h.display_name \
FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
WHERE hgm.hostgroup_id = hg.hostgroup_id \
AND hgm.host_object_id = h.host_object_id \
AND hg.alias =:hostgroup_alias")
hosts = connection.execute(select_hosts)
self.result['all']['hosts'] = [host['display_name'] for host in hosts]
for hostgroup in connection.execute(select_hostgroups):
hostgroup_alias = hostgroup['alias']
self.result[hostgroup_alias] = {}
hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
def __init__(self):
self.defaultgroup = 'group_all'
self.ndo_database_uri = None
self.options = None
self.read_settings()
self.read_cli()
self.result = {}
self.result['all'] = {}
self.result['all']['hosts'] = []
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
if self.ndo_database_uri:
self.get_hosts()
if self.options.host:
print(json.dumps({}))
elif self.options.list:
print(json.dumps(self.result))
else:
sys.exit("usage: --list or --host HOSTNAME")
else:
sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.")
NagiosNDOInventory()

View file

@ -1,346 +0,0 @@
#!/usr/bin/env python
'''
nsot
====
Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
Features
--------
* Define host groups in form of NSoT device attribute criteria
* All parameters defined by the spec as of 2015-09-05 are supported.
+ ``--list``: Returns JSON hash of host groups -> hosts and top-level
``_meta`` -> ``hostvars`` which correspond to all device attributes.
Group vars can be specified in the YAML configuration, noted below.
+ ``--host <hostname>``: Returns JSON hash where every item is a device
attribute.
* In addition to all attributes assigned to resource being returned, script
will also append ``site_id`` and ``id`` as facts to utilize.
Configuration
------------
Since it'd be annoying and failure prone to guess where you're configuration
file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
This file should adhere to the YAML spec. All top-level variable must be
desired Ansible group-name hashed with single 'query' item to define the NSoT
attribute query.
Queries follow the normal NSoT query syntax, `shown here`_
.. _shown here: https://github.com/dropbox/pynsot#set-queries
.. code:: yaml
routers:
query: 'deviceType=ROUTER'
vars:
a: b
c: d
juniper_fw:
query: 'deviceType=FIREWALL manufacturer=JUNIPER'
not_f10:
query: '-manufacturer=FORCE10'
The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
cli would, so make sure that's configured appropriately.
.. note::
Attributes I'm showing above are influenced from ones that the Trigger
project likes. As is the spirit of NSoT, use whichever attributes work best
for your workflow.
If config file is blank or absent, the following default groups will be
created:
* ``routers``: deviceType=ROUTER
* ``switches``: deviceType=SWITCH
* ``firewalls``: deviceType=FIREWALL
These are likely not useful for everyone so please use the configuration. :)
.. note::
By default, resources will only be returned for what your default
site is set for in your ``~/.pynsotrc``.
If you want to specify, add an extra key under the group for ``site: n``.
Output Examples
---------------
Here are some examples shown from just calling the command directly::
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
{
"routers": {
"hosts": [
"test1.example.com"
],
"vars": {
"cool_level": "very",
"group": "routers"
}
},
"firewalls": {
"hosts": [
"test2.example.com"
],
"vars": {
"cool_level": "enough",
"group": "firewalls"
}
},
"_meta": {
"hostvars": {
"test2.example.com": {
"make": "SRX",
"site_id": 1,
"id": 108
},
"test1.example.com": {
"make": "MX80",
"site_id": 1,
"id": 107
}
}
},
"rtr_and_fw": {
"hosts": [
"test1.example.com",
"test2.example.com"
],
"vars": {}
}
}
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
{
"make": "MX80",
"site_id": 1,
"id": 107
}
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
import pkg_resources
import argparse
import json
import yaml
from textwrap import dedent
from pynsot.client import get_api_client
from pynsot.app import HttpServerError
from click.exceptions import UsageError
from ansible.module_utils.six import string_types
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
class NSoTInventory(object):
'''NSoT Client object for gather inventory'''
def __init__(self):
self.config = dict()
config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
if config_env:
try:
config_file = os.path.abspath(config_env)
except IOError: # If file non-existent, use default config
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
with open(config_file) as f:
try:
self.config.update(yaml.safe_load(f))
except TypeError: # If empty file, use default config
warning('Empty config file')
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
else: # Use defaults if env var missing
self._config_default()
self.groups = list(self.config.keys())
self.client = get_api_client()
self._meta = {'hostvars': dict()}
def _config_default(self):
default_yaml = '''
---
routers:
query: deviceType=ROUTER
switches:
query: deviceType=SWITCH
firewalls:
query: deviceType=FIREWALL
'''
self.config = yaml.safe_load(dedent(default_yaml))
def do_list(self):
'''Direct callback for when ``--list`` is provided
Relies on the configuration generated from init to run
_inventory_group()
'''
inventory = dict()
for group, contents in self.config.items():
group_response = self._inventory_group(group, contents)
inventory.update(group_response)
inventory.update({'_meta': self._meta})
return json.dumps(inventory)
def do_host(self, host):
return json.dumps(self._hostvars(host))
def _hostvars(self, host):
'''Return dictionary of all device attributes
Depending on number of devices in NSoT, could be rather slow since this
has to request every device resource to filter through
'''
device = [i for i in self.client.devices.get()
if host in i['hostname']][0]
attributes = device['attributes']
attributes.update({'site_id': device['site_id'], 'id': device['id']})
return attributes
def _inventory_group(self, group, contents):
'''Takes a group and returns inventory for it as dict
:param group: Group name
:type group: str
:param contents: The contents of the group's YAML config
:type contents: dict
contents param should look like::
{
'query': 'xx',
'vars':
'a': 'b'
}
Will return something like::
{ group: {
hosts: [],
vars: {},
}
'''
query = contents.get('query')
hostvars = contents.get('vars', dict())
site = contents.get('site', dict())
obj = {group: dict()}
obj[group]['hosts'] = []
obj[group]['vars'] = hostvars
try:
assert isinstance(query, string_types)
except Exception:
sys.exit('ERR: Group queries must be a single string\n'
' Group: %s\n'
' Query: %s\n' % (group, query)
)
try:
if site:
site = self.client.sites(site)
devices = site.devices.query.get(query=query)
else:
devices = self.client.devices.query.get(query=query)
except HttpServerError as e:
if '500' in str(e.response):
_site = 'Correct site id?'
_attr = 'Queried attributes actually exist?'
questions = _site + '\n' + _attr
sys.exit('ERR: 500 from server.\n%s' % questions)
else:
raise
except UsageError:
sys.exit('ERR: Could not connect to server. Running?')
# Would do a list comprehension here, but would like to save code/time
# and also acquire attributes in this step
for host in devices:
# Iterate through each device that matches query, assign hostname
# to the group's hosts array and then use this single iteration as
# a chance to update self._meta which will be used in the final
# return
hostname = host['hostname']
obj[group]['hosts'].append(hostname)
attributes = host['attributes']
attributes.update({'site_id': host['site_id'], 'id': host['id']})
self._meta['hostvars'].update({hostname: attributes})
return obj
def parse_args():
desc = __doc__.splitlines()[4] # Just to avoid being redundant
# Establish parser with options and error out if no action provided
parser = argparse.ArgumentParser(
description=desc,
conflict_handler='resolve',
)
# Arguments
#
# Currently accepting (--list | -l) and (--host | -h)
# These must not be allowed together
parser.add_argument(
'--list', '-l',
help='Print JSON object containing hosts to STDOUT',
action='store_true',
dest='list_', # Avoiding syntax highlighting for list
)
parser.add_argument(
'--host', '-h',
help='Print JSON object containing hostvars for <host>',
action='store',
)
args = parser.parse_args()
if not args.list_ and not args.host: # Require at least one option
parser.exit(status=1, message='No action requested')
if args.list_ and args.host: # Do not allow multiple options
parser.exit(status=1, message='Too many actions requested')
return args
def main():
'''Set up argument handling and callback routing'''
args = parse_args()
client = NSoTInventory()
# Callback condition
if args.list_:
print(client.do_list())
elif args.host:
print(client.do_host(args.host))
if __name__ == '__main__':
main()

View file

@ -1,22 +0,0 @@
---
juniper_routers:
query: 'deviceType=ROUTER manufacturer=JUNIPER'
vars:
group: juniper_routers
netconf: true
os: junos
cisco_asa:
query: 'manufacturer=CISCO deviceType=FIREWALL'
vars:
group: cisco_asa
routed_vpn: false
stateful: true
old_cisco_asa:
query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+'
vars:
old_nat: true
not_f10:
query: '-manufacturer=FORCE10'

View file

@ -1,89 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
inventory: openshift
short_description: Openshift gears external inventory script
description:
- Generates inventory of Openshift gears using the REST interface
- this permit to reuse playbook to setup an Openshift gear
author: Michael Scherer
'''
import json
import os
import os.path
import sys
import StringIO
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves import configparser as ConfigParser
configparser = None
def get_from_rhc_config(variable):
global configparser
CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
if os.path.exists(CONF_FILE):
if not configparser:
ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
configparser = ConfigParser.SafeConfigParser()
configparser.readfp(StringIO.StringIO(ini_str))
try:
return configparser.get('root', variable)
except ConfigParser.NoOptionError:
return None
def get_config(env_var, config_var):
result = os.getenv(env_var)
if not result:
result = get_from_rhc_config(config_var)
if not result:
sys.exit("failed=True msg='missing %s'" % env_var)
return result
def get_json_from_api(url, username, password):
headers = {'Accept': 'application/json; version=1.5'}
response = open_url(url, headers=headers, url_username=username, url_password=password)
return json.loads(response.read())['data']
username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
response = get_json_from_api(broker_url + '/domains', username, password)
response = get_json_from_api("%s/domains/%s/applications" %
(broker_url, response[0]['id']), username, password)
result = {}
for app in response:
# ssh://520311404832ce3e570000ff@blog-johndoe.example.org
(user, host) = app['ssh_url'][6:].split('@')
app_name = host.split('-')[0]
result[app_name] = {}
result[app_name]['hosts'] = []
result[app_name]['hosts'].append(host)
result[app_name]['vars'] = {}
result[app_name]['vars']['ansible_ssh_user'] = user
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")

View file

@ -1,74 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# openvz.py
#
# Copyright 2014 jordonr <jordon@beamsyn.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Inspired by libvirt_lxc.py inventory script
# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
#
# Groups are determined by the description field of openvz guests
# multiple groups can be separated by commas: webserver,dbserver
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from subprocess import Popen, PIPE
import sys
import json
# List openvz hosts
vzhosts = ['vzhost1', 'vzhost2', 'vzhost3']
# Add openvz hosts to the inventory and Add "_meta" trick
inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
# default group, when description not defined
default_group = ['vzguest']
def get_guests():
# Loop through vzhosts
for h in vzhosts:
# SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True)
# Load Json info of guests
json_data = json.loads(pipe.stdout.read())
# loop through guests
for j in json_data:
# Add information to host vars
inventory['_meta']['hostvars'][j['hostname']] = {
'ctid': j['ctid'],
'veid': j['veid'],
'vpsid': j['vpsid'],
'private_path': j['private'],
'root_path': j['root'],
'ip': j['ip']
}
# determine group from guest description
if j['description'] is not None:
groups = j['description'].split(",")
else:
groups = default_group
# add guest to inventory
for g in groups:
if g not in inventory:
inventory[g] = {'hosts': []}
inventory[g]['hosts'].append(j['hostname'])
return inventory
if len(sys.argv) == 2 and sys.argv[1] == '--list':
inv_json = get_guests()
print(json.dumps(inv_json, sort_keys=True))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")

View file

@ -1,35 +0,0 @@
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Author: Josha Inglis <jinglis@iix.net> based on the gce.ini by Eric Johnson <erjohnso@google.com>
[ovirt]
# For ovirt.py script, which can be used with Python SDK version 3
# Service Account configuration information can be stored in the
# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
# exist in your PYTHONPATH and be picked up automatically with an import
# statement in the inventory script. However, you can specify an absolute
# path to the secrets.py file with 'libcloud_secrets' parameter.
ovirt_api_secrets =
# If you are not going to use a 'secrets.py' file, you can set the necessary
# authorization parameters here.
ovirt_url =
ovirt_username =
ovirt_password =
ovirt_ca_file =

View file

@ -1,279 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 IIX Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
ovirt external inventory script
=================================
Generates inventory that Ansible can understand by making API requests to
oVirt via the ovirt-engine-sdk-python library.
When run against a specific host, this script returns the following variables
based on the data obtained from the ovirt_sdk Node object:
- ovirt_uuid
- ovirt_id
- ovirt_image
- ovirt_machine_type
- ovirt_ips
- ovirt_name
- ovirt_description
- ovirt_status
- ovirt_zone
- ovirt_tags
- ovirt_stats
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- running status:
group name prefixed with 'status_' (e.g. status_up, status_down,..)
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a"
Use the ovirt inventory script to print out instance specific information
$ contrib/inventory/ovirt.py --host my_instance
Author: Josha Inglis <jinglis@iix.net> based on the gce.py by Eric Johnson <erjohnso@google.com>
Version: 0.0.1
"""
USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin"
USER_AGENT_VERSION = "v1"
import sys
import os
import argparse
from collections import defaultdict
from ansible.module_utils.six.moves import configparser as ConfigParser
import json
try:
# noinspection PyUnresolvedReferences
from ovirtsdk.api import API
# noinspection PyUnresolvedReferences
from ovirtsdk.xml import params
except ImportError:
print("ovirt inventory script requires ovirt-engine-sdk-python")
sys.exit(1)
class OVirtInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.args = self.parse_cli_args()
self.driver = self.get_ovirt_driver()
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.node_to_dict(self.get_instance(self.args.host)),
pretty=self.args.pretty
))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print(
self.json_format_dict(
data=self.group_instances(),
pretty=self.args.pretty
)
)
sys.exit(0)
@staticmethod
def get_ovirt_driver():
"""
Determine the ovirt authorization settings and return a ovirt_sdk driver.
:rtype : ovirtsdk.api.API
"""
kwargs = {}
ovirt_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "ovirt.ini")
ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'ovirt_url': '',
'ovirt_username': '',
'ovirt_password': '',
'ovirt_api_secrets': '',
})
if 'ovirt' not in config.sections():
config.add_section('ovirt')
config.read(ovirt_ini_path)
# Attempt to get ovirt params from a configuration file, if one
# exists.
secrets_path = config.get('ovirt', 'ovirt_api_secrets')
secrets_found = False
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
secrets_found = True
except ImportError:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
except ImportError:
pass
if not secrets_found:
kwargs = {
'url': config.get('ovirt', 'ovirt_url'),
'username': config.get('ovirt', 'ovirt_username'),
'password': config.get('ovirt', 'ovirt_password'),
}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url'])
kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None)
kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None)
# Retrieve and return the ovirt driver.
return API(insecure=True, **kwargs)
@staticmethod
def parse_cli_args():
"""
Command line argument processing
:rtype : argparse.Namespace
"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)')
return parser.parse_args()
def node_to_dict(self, inst):
"""
:type inst: params.VM
"""
if inst is None:
return {}
inst.get_custom_properties()
ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \
if inst.get_guest_info() is not None else []
stats = {}
for stat in inst.get_statistics().list():
stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum()
return {
'ovirt_uuid': inst.get_id(),
'ovirt_id': inst.get_id(),
'ovirt_image': inst.get_os().get_type(),
'ovirt_machine_type': self.get_machine_type(inst),
'ovirt_ips': ips,
'ovirt_name': inst.get_name(),
'ovirt_description': inst.get_description(),
'ovirt_status': inst.get_status().get_state(),
'ovirt_zone': inst.get_cluster().get_id(),
'ovirt_tags': self.get_tags(inst),
'ovirt_stats': stats,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ips[0] if len(ips) > 0 else None
}
@staticmethod
def get_tags(inst):
"""
:type inst: params.VM
"""
return [x.get_name() for x in inst.get_tags().list()]
def get_machine_type(self, inst):
inst_type = inst.get_instance_type()
if inst_type:
return self.driver.instancetypes.get(id=inst_type.id).name
# noinspection PyBroadException,PyUnusedLocal
def get_instance(self, instance_name):
"""Gets details about a specific instance """
try:
return self.driver.vms.get(name=instance_name)
except Exception as e:
return None
def group_instances(self):
"""Group all instances"""
groups = defaultdict(list)
meta = {"hostvars": {}}
for node in self.driver.vms.list():
assert isinstance(node, params.VM)
name = node.get_name()
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.get_cluster().get_name()
groups[zone].append(name)
tags = self.get_tags(node)
for t in tags:
tag = 'tag_%s' % t
groups[tag].append(name)
nets = [x.get_name() for x in node.get_nics().list()]
for net in nets:
net = 'network_%s' % net
groups[net].append(name)
status = node.get_status().get_state()
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
groups["_meta"] = meta
return groups
@staticmethod
def json_format_dict(data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted
string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
OVirtInventory()

View file

@ -1,258 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
oVirt dynamic inventory script
=================================
Generates dynamic inventory file for oVirt.
Script will return following attributes for each virtual machine:
- id
- name
- host
- cluster
- status
- description
- fqdn
- os_type
- template
- tags
- statistics
- devices
When run in --list mode, virtual machines are grouped by the following categories:
- cluster
- tag
- status
Note: If there is some virtual machine which has has more tags it will be in both tag
records.
Examples:
# Execute update of system on webserver virtual machine:
$ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
# Get webserver virtual machine information:
$ contrib/inventory/ovirt4.py --host webserver
Author: Ondra Machacek (@machacekondra)
"""
import argparse
import os
import sys
from collections import defaultdict
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six import PY2
import json
try:
import ovirtsdk4 as sdk
import ovirtsdk4.types as otypes
except ImportError:
print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
sys.exit(1)
def parse_args():
"""
Create command line parser for oVirt dynamic inventory script.
"""
parser = argparse.ArgumentParser(
description='Ansible dynamic inventory script for oVirt.',
)
parser.add_argument(
'--list',
action='store_true',
default=True,
help='Get data of all virtual machines (default: True).',
)
parser.add_argument(
'--host',
help='Get data of virtual machines running on specified host.',
)
parser.add_argument(
'--pretty',
action='store_true',
default=False,
help='Pretty format (default: False).',
)
return parser.parse_args()
def create_connection():
"""
Create a connection to oVirt engine API.
"""
# Get the path of the configuration file, by default use
# 'ovirt.ini' file in script directory:
default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'ovirt.ini',
)
config_path = os.environ.get('OVIRT_INI_PATH', default_path)
# Create parser and add ovirt section if it doesn't exist:
if PY2:
config = configparser.SafeConfigParser(
defaults={
'ovirt_url': os.environ.get('OVIRT_URL'),
'ovirt_username': os.environ.get('OVIRT_USERNAME'),
'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''),
}, allow_no_value=True
)
else:
config = configparser.ConfigParser(
defaults={
'ovirt_url': os.environ.get('OVIRT_URL'),
'ovirt_username': os.environ.get('OVIRT_USERNAME'),
'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''),
}, allow_no_value=True
)
if not config.has_section('ovirt'):
config.add_section('ovirt')
config.read(config_path)
# Create a connection with options defined in ini file:
return sdk.Connection(
url=config.get('ovirt', 'ovirt_url'),
username=config.get('ovirt', 'ovirt_username'),
password=config.get('ovirt', 'ovirt_password', raw=True),
ca_file=config.get('ovirt', 'ovirt_ca_file') or None,
insecure=not config.get('ovirt', 'ovirt_ca_file'),
)
def get_dict_of_struct(connection, vm):
"""
Transform SDK Vm Struct type to Python dictionary.
"""
if vm is None:
return dict()
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
vm_service = vms_service.vm_service(vm.id)
devices = vm_service.reported_devices_service().list()
tags = vm_service.tags_service().list()
stats = vm_service.statistics_service().list()
labels = vm_service.affinity_labels_service().list()
groups = clusters_service.cluster_service(
vm.cluster.id
).affinity_groups_service().list()
return {
'id': vm.id,
'name': vm.name,
'host': connection.follow_link(vm.host).name if vm.host else None,
'cluster': connection.follow_link(vm.cluster).name,
'status': str(vm.status),
'description': vm.description,
'fqdn': vm.fqdn,
'os_type': vm.os.type,
'template': connection.follow_link(vm.template).name,
'tags': [tag.name for tag in tags],
'affinity_labels': [label.name for label in labels],
'affinity_groups': [
group.name for group in groups
if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
],
'statistics': dict(
(stat.name, stat.values[0].datum) for stat in stats if stat.values
),
'devices': dict(
(device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
),
'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
}
def get_data(connection, vm_name=None):
"""
Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
"""
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
if vm_name:
vm = vms_service.list(search='name=%s' % vm_name) or [None]
data = get_dict_of_struct(
connection=connection,
vm=vm[0],
)
else:
vms = dict()
data = defaultdict(list)
for vm in vms_service.list():
name = vm.name
vm_service = vms_service.vm_service(vm.id)
cluster_service = clusters_service.cluster_service(vm.cluster.id)
# Add vm to vms dict:
vms[name] = get_dict_of_struct(connection, vm)
# Add vm to cluster group:
cluster_name = connection.follow_link(vm.cluster).name
data['cluster_%s' % cluster_name].append(name)
# Add vm to tag group:
tags_service = vm_service.tags_service()
for tag in tags_service.list():
data['tag_%s' % tag.name].append(name)
# Add vm to status group:
data['status_%s' % vm.status].append(name)
# Add vm to affinity group:
for group in cluster_service.affinity_groups_service().list():
if vm.name in [
v.name for v in connection.follow_link(group.vms)
]:
data['affinity_group_%s' % group.name].append(vm.name)
# Add vm to affinity label group:
affinity_labels_service = vm_service.affinity_labels_service()
for label in affinity_labels_service.list():
data['affinity_label_%s' % label.name].append(name)
data["_meta"] = {
'hostvars': vms,
}
return data
def main():
args = parse_args()
connection = create_connection()
print(
json.dumps(
obj=get_data(
connection=connection,
vm_name=args.host,
),
sort_keys=args.pretty,
indent=args.pretty * 2,
)
)
if __name__ == '__main__':
main()

View file

@ -1,53 +0,0 @@
# Ansible Packet.net external inventory script settings
#
[packet]
# Packet projects to get info for. Set this to 'all' to get info for all
# projects in Packet and merge the results together. Alternatively, set
# this to a comma separated list of projects. E.g. 'project-1,project-3,project-4'
projects = all
projects_exclude =
# By default, packet devices in all state are returned. Specify
# packet device states to return as a comma-separated list.
# device_states = active, inactive, queued, provisioning
# items per page to retrieve from packet api at a time
items_per_page = 999
# API calls to Packet are costly. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-packet.cache
# - ansible-packet.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = True
# The packet inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_device_id = True
group_by_hostname = True
group_by_facility = True
group_by_project = True
group_by_operating_system = True
group_by_plan_type = True
group_by_tags = True
group_by_tag_none = True
# If you only want to include hosts that match a certain regular expression
# pattern_include = staging-*
# If you want to exclude any hosts that match a certain regular expression
# pattern_exclude = staging-*

View file

@ -1,496 +0,0 @@
#!/usr/bin/env python
'''
Packet.net external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Packet.net using the Packet library.
NOTE: This script assumes Ansible is being executed where the environment
variable needed for Packet API Token already been set:
export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
This script also assumes there is a packet_net.ini file alongside it. To specify a
different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable:
export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini
'''
# (c) 2016, Peter Sankauskas
# (c) 2017, Tomas Karasek
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
import sys
import os
import argparse
import re
from time import time
from ansible.module_utils import six
from ansible.module_utils.six.moves import configparser
try:
import packet
except ImportError as e:
sys.exit("failed=True msg='`packet-python` library required for this script'")
import traceback
import json
ini_section = 'packet'
class PacketInventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by device IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to device ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of devices for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the packet_net.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
_ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')
if _ini_path_raw:
packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw))
else:
packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
config.read(packet_ini_path)
# items per page
self.items_per_page = 999
if config.has_option(ini_section, 'items_per_page'):
config.get(ini_section, 'items_per_page')
# Instance states to be gathered in inventory. Default is all of them.
packet_valid_device_states = [
'active',
'inactive',
'queued',
'provisioning'
]
self.packet_device_states = []
if config.has_option(ini_section, 'device_states'):
for device_state in config.get(ini_section, 'device_states').split(','):
device_state = device_state.strip()
if device_state not in packet_valid_device_states:
continue
self.packet_device_states.append(device_state)
else:
self.packet_device_states = packet_valid_device_states
# Cache related
cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-packet.cache"
self.cache_path_index = cache_dir + "/ansible-packet.index"
self.cache_max_age = config.getint(ini_section, 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option(ini_section, 'nested_groups'):
self.nested_groups = config.getboolean(ini_section, 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option(ini_section, 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_device_id',
'group_by_hostname',
'group_by_facility',
'group_by_project',
'group_by_operating_system',
'group_by_plan_type',
'group_by_tags',
'group_by_tag_none',
]
for option in group_by_options:
if config.has_option(ini_section, option):
setattr(self, option, config.getboolean(ini_section, option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get(ini_section, 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get(ini_section, 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Projects
self.projects = []
configProjects = config.get(ini_section, 'projects')
configProjects_exclude = config.get(ini_section, 'projects_exclude')
if (configProjects == 'all'):
for projectInfo in self.get_projects():
if projectInfo.name not in configProjects_exclude:
self.projects.append(projectInfo.name)
else:
self.projects = configProjects.split(",")
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet')
parser.add_argument('--list', action='store_true', default=True,
help='List Devices (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific device')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
for projectInfo in self.get_projects():
if projectInfo.name in self.projects:
self.get_devices_by_project(projectInfo)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self):
''' create connection to api server'''
token = os.environ.get('PACKET_API_TOKEN')
if token is None:
raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
manager = packet.Manager(auth_token=token)
return manager
def get_projects(self):
'''Makes a Packet API call to get the list of projects'''
params = {
'per_page': self.items_per_page
}
try:
manager = self.connect()
projects = manager.list_projects(params=params)
return projects
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet projects')
def get_devices_by_project(self, project):
''' Makes an Packet API call to the list of devices in a particular
project '''
params = {
'per_page': self.items_per_page
}
try:
manager = self.connect()
devices = manager.list_devices(project_id=project.id, params=params)
for device in devices:
self.add_device(device, project)
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet devices')
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_device(self, device_id):
manager = self.connect()
device = manager.get_device(device_id)
return device
def add_device(self, device, project):
''' Adds a device to the inventory and index, as long as it is
addressable '''
# Only return devices with desired device states
if device.state not in self.packet_device_states:
return
# Select the best destination address. Only include management
# addresses as non-management (elastic) addresses need manual
# host configuration to be routable.
# See https://help.packet.net/article/54-elastic-ips.
dest = None
for ip_address in device.ip_addresses:
if ip_address['public'] is True and \
ip_address['address_family'] == 4 and \
ip_address['management'] is True:
dest = ip_address['address']
if not dest:
# Skip devices we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(device.hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(device.hostname):
return
# Add to index
self.index[dest] = [project.id, device.id]
# Inventory: Group by device ID (always a group of 1)
if self.group_by_device_id:
self.inventory[device.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'devices', device.id)
# Inventory: Group by device name (hopefully a group of 1)
if self.group_by_hostname:
self.push(self.inventory, device.hostname, dest)
if self.nested_groups:
self.push_group(self.inventory, 'hostnames', project.name)
# Inventory: Group by project
if self.group_by_project:
self.push(self.inventory, project.name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'projects', project.name)
# Inventory: Group by facility
if self.group_by_facility:
self.push(self.inventory, device.facility['code'], dest)
if self.nested_groups:
if self.group_by_facility:
self.push_group(self.inventory, project.name, device.facility['code'])
# Inventory: Group by OS
if self.group_by_operating_system:
self.push(self.inventory, device.operating_system.slug, dest)
if self.nested_groups:
self.push_group(self.inventory, 'operating_systems', device.operating_system.slug)
# Inventory: Group by plan type
if self.group_by_plan_type:
self.push(self.inventory, device.plan['slug'], dest)
if self.nested_groups:
self.push_group(self.inventory, 'plans', device.plan['slug'])
# Inventory: Group by tag keys
if self.group_by_tags:
for k in device.tags:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
# Global Tag: devices without tags
if self.group_by_tag_none and len(device.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all Packet devices
self.push(self.inventory, 'packet', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
def get_host_info_dict_from_device(self, device):
device_vars = {}
for key in vars(device):
value = getattr(device, key)
key = self.to_safe('packet_' + key)
# Handle complex types
if key == 'packet_state':
device_vars[key] = device.state or ''
elif key == 'packet_hostname':
device_vars[key] = value
elif isinstance(value, (int, bool)):
device_vars[key] = value
elif isinstance(value, six.string_types):
device_vars[key] = value.strip()
elif value is None:
device_vars[key] = ''
elif key == 'packet_facility':
device_vars[key] = value['code']
elif key == 'packet_operating_system':
device_vars[key] = value.slug
elif key == 'packet_plan':
device_vars[key] = value['slug']
elif key == 'packet_tags':
for k in value:
key = self.to_safe('packet_tag_' + k)
device_vars[key] = k
else:
pass
# print key
# print type(value)
# print value
return device_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(project_id, device_id) = self.index[self.args.host]
device = self.get_device(device_id)
return self.json_format_dict(self.get_host_info_dict_from_device(device), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += r"\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
PacketInventory()

View file

@ -1,240 +0,0 @@
#!/usr/bin/env python
# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <gauthierl@lapth.cnrs.fr>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Updated 2016 by Matt Harris <matthaeus.harris@gmail.com>
#
# Added support for Proxmox VE 4.x
# Added support for using the Notes field of a VM to define groups and variables:
# A well-formatted JSON object in the Notes field will be added to the _meta
# section for that VM. In addition, the "groups" key of this JSON object may be
# used to specify group membership:
#
# { "groups": ["utility", "databases"], "a": false, "b": true }
import json
import os
import sys
from optparse import OptionParser
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
class ProxmoxVM(dict):
def get_variables(self):
variables = {}
for key, value in iteritems(self):
variables['proxmox_' + key] = value
return variables
class ProxmoxVMList(list):
def __init__(self, data=None):
data = [] if data is None else data
for item in data:
self.append(ProxmoxVM(item))
def get_names(self):
return [vm['name'] for vm in self if vm['template'] != 1]
def get_by_name(self, name):
results = [vm for vm in self if vm['name'] == name]
return results[0] if len(results) > 0 else None
def get_variables(self):
variables = {}
for vm in self:
variables[vm['name']] = vm.get_variables()
return variables
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
class ProxmoxPool(dict):
def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object):
def __init__(self, options):
self.options = options
self.credentials = None
if not options.url:
raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
elif not options.username:
raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
elif not options.password:
raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
def auth(self):
request_path = '{0}api2/json/access/ticket'.format(self.options.url)
request_params = urlencode({
'username': self.options.username,
'password': self.options.password,
})
data = json.load(open_url(request_path, data=request_params))
self.credentials = {
'ticket': data['data']['ticket'],
'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
}
def get(self, url, data=None):
request_path = '{0}{1}'.format(self.options.url, url)
headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
request = open_url(request_path, data=data, headers=headers)
response = json.load(request)
return response['data']
def nodes(self):
return ProxmoxNodeList(self.get('api2/json/nodes'))
def vms_by_type(self, node, type):
return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type)))
def vm_description_by_type(self, node, vm, type):
return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm))
def node_qemu(self, node):
return self.vms_by_type(node, 'qemu')
def node_qemu_description(self, node, vm):
return self.vm_description_by_type(node, vm, 'qemu')
def node_lxc(self, node):
return self.vms_by_type(node, 'lxc')
def node_lxc_description(self, node, vm):
return self.vm_description_by_type(node, vm, 'lxc')
def pools(self):
return ProxmoxPoolList(self.get('api2/json/pools'))
def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid)))
def main_list(options):
results = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
}
}
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
results['all']['hosts'] += qemu_list.get_names()
results['_meta']['hostvars'].update(qemu_list.get_variables())
lxc_list = proxmox_api.node_lxc(node)
results['all']['hosts'] += lxc_list.get_names()
results['_meta']['hostvars'].update(lxc_list.get_variables())
for vm in results['_meta']['hostvars']:
vmid = results['_meta']['hostvars'][vm]['proxmox_vmid']
try:
type = results['_meta']['hostvars'][vm]['proxmox_type']
except KeyError:
type = 'qemu'
try:
description = proxmox_api.vm_description_by_type(node, vmid, type)['description']
except KeyError:
description = None
try:
metadata = json.loads(description)
except TypeError:
metadata = {}
except ValueError:
metadata = {
'notes': description
}
if 'groups' in metadata:
# print metadata
for group in metadata['groups']:
if group not in results:
results[group] = {
'hosts': []
}
results[group]['hosts'] += [vm]
results['_meta']['hostvars'][vm].update(metadata)
# pools
for pool in proxmox_api.pools().get_names():
results[pool] = {
'hosts': proxmox_api.pool(pool).get_members_name(),
}
return results
def main_host(options):
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
qemu = qemu_list.get_by_name(options.host)
if qemu:
return qemu.get_variables()
return {}
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list")
parser.add_option('--host', dest="host")
parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
(options, args) = parser.parse_args()
if options.list:
data = main_list(options)
elif options.host:
data = main_host(options)
else:
parser.print_help()
sys.exit(1)
indent = None
if options.pretty:
indent = 2
print(json.dumps(data, indent=indent))
if __name__ == '__main__':
main()

View file

@ -1,86 +0,0 @@
#!/usr/bin/env python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import requests
import argparse
RACKHD_URL = 'http://localhost:8080'
class RackhdInventory(object):
def __init__(self, nodeids):
self._inventory = {}
for nodeid in nodeids:
self._load_inventory_data(nodeid)
inventory = {}
for (nodeid, info) in self._inventory.items():
inventory[nodeid] = (self._format_output(nodeid, info))
print(json.dumps(inventory))
def _load_inventory_data(self, nodeid):
info = {}
info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid)
info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
results = {}
for (key, url) in info.items():
r = requests.get(url, verify=False)
results[key] = r.text
self._inventory[nodeid] = results
def _format_output(self, nodeid, info):
try:
node_info = json.loads(info['lookup'])
ipaddress = ''
if len(node_info) > 0:
ipaddress = node_info[0]['ipAddress']
output = {'hosts': [ipaddress], 'vars': {}}
for (key, result) in info.items():
output['vars'][key] = json.loads(result)
output['vars']['ansible_ssh_user'] = 'monorail'
except KeyError:
pass
return output
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
return parser.parse_args()
try:
# check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
except Exception:
# use default values
pass
# Use the nodeid specified in the environment to limit the data returned
# or return data for all available nodes
nodeids = []
if (parse_args().host):
try:
nodeids += parse_args().host.split(',')
RackhdInventory(nodeids)
except Exception:
pass
if (parse_args().list):
try:
url = RACKHD_URL + '/api/common/nodes'
r = requests.get(url, verify=False)
data = json.loads(r.text)
for entry in data:
if entry['type'] == 'compute':
nodeids.append(entry['id'])
RackhdInventory(nodeids)
except Exception:
pass

View file

@ -1,66 +0,0 @@
# Ansible Rackspace external inventory script settings
#
[rax]
# Environment Variable: RAX_CREDS_FILE
#
# An optional configuration that points to a pyrax-compatible credentials
# file.
#
# If not supplied, rax.py will look for a credentials file
# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
# and therefore requires a file formatted per the SDK's specifications.
#
# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
# creds_file = ~/.rackspace_cloud_credentials
# Environment Variable: RAX_REGION
#
# An optional environment variable to narrow inventory search
# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
# datacenter) and optionally accepts a comma-separated list.
# regions = IAD,ORD,DFW
# Environment Variable: RAX_ENV
#
# A configuration that will use an environment as configured in
# ~/.pyrax.cfg, see
# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
# env = prod
# Environment Variable: RAX_META_PREFIX
# Default: meta
#
# A configuration that changes the prefix used for meta key/value groups.
# For compatibility with ec2.py set to "tag"
# meta_prefix = meta
# Environment Variable: RAX_ACCESS_NETWORK
# Default: public
#
# A configuration that will tell the inventory script to use a specific
# server network to determine the ansible_ssh_host value. If no address
# is found, ansible_ssh_host will not be set. Accepts a comma-separated
# list of network names, the first found wins.
# access_network = public
# Environment Variable: RAX_ACCESS_IP_VERSION
# Default: 4
#
# A configuration related to "access_network" that will attempt to
# determine the ansible_ssh_host value for either IPv4 or IPv6. If no
# address is found, ansible_ssh_host will not be set.
# Acceptable values are: 4 or 6. Values other than 4 or 6
# will be ignored, and 4 will be used. Accepts a comma separated list,
# the first found wins.
# access_ip_version = 4
# Environment Variable: RAX_CACHE_MAX_AGE
# Default: 600
#
# A configuration the changes the behavior or the inventory cache.
# Inventory listing performed before this value will be returned from
# the cache instead of making a full request for all inventory. Setting
# this value to 0 will force a full request.
# cache_max_age = 600

View file

@ -1,460 +0,0 @@
#!/usr/bin/env python
# (c) 2013, Jesse Keating <jesse.keating@rackspace.com,
# Paul Durivage <paul.durivage@rackspace.com>,
# Matt Martz <matt@sivel.net>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Rackspace Cloud Inventory
Authors:
Jesse Keating <jesse.keating@rackspace.com,
Paul Durivage <paul.durivage@rackspace.com>,
Matt Martz <matt@sivel.net>
Description:
Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
When run against a specific host, this script returns variables similar to:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
Configuration:
rax.py can be configured using a rax.ini file or via environment
variables. The rax.ini file should live in the same directory along side
this script.
The section header for configuration values related to this
inventory plugin is [rax]
[rax]
creds_file = ~/.rackspace_cloud_credentials
regions = IAD,ORD,DFW
env = prod
meta_prefix = meta
access_network = public
access_ip_version = 4
Each of these configurations also has a corresponding environment variable.
An environment variable will override a configuration file value.
creds_file:
Environment Variable: RAX_CREDS_FILE
An optional configuration that points to a pyrax-compatible credentials
file.
If not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
and therefore requires a file formatted per the SDK's specifications.
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
regions:
Environment Variable: RAX_REGION
An optional environment variable to narrow inventory search
scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list.
environment:
Environment Variable: RAX_ENV
A configuration that will use an environment as configured in
~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
meta_prefix:
Environment Variable: RAX_META_PREFIX
Default: meta
A configuration that changes the prefix used for meta key/value groups.
For compatibility with ec2.py set to "tag"
access_network:
Environment Variable: RAX_ACCESS_NETWORK
Default: public
A configuration that will tell the inventory script to use a specific
server network to determine the ansible_ssh_host value. If no address
is found, ansible_ssh_host will not be set. Accepts a comma-separated
list of network names, the first found wins.
access_ip_version:
Environment Variable: RAX_ACCESS_IP_VERSION
Default: 4
A configuration related to "access_network" that will attempt to
determine the ansible_ssh_host value for either IPv4 or IPv6. If no
address is found, ansible_ssh_host will not be set.
Acceptable values are: 4 or 6. Values other than 4 or 6
will be ignored, and 4 will be used. Accepts a comma-separated list,
the first found wins.
Examples:
List server instances
$ RAX_CREDS_FILE=~/.raxpub rax.py --list
List servers in ORD datacenter only
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
List servers in ORD and DFW datacenters
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
Get server details for server named "server.example.com"
$ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
Use the instance private IP to connect (instead of public IP)
$ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list
"""
import os
import re
import sys
import argparse
import warnings
import collections
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
import json
try:
import pyrax
from pyrax.utils import slugify
except ImportError:
sys.exit('pyrax is required for this module')
from time import time
from ansible.constants import get_config
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import text_type
NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None))
def load_config_file():
p = ConfigParser.ConfigParser()
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'rax.ini')
try:
p.read(config_file)
except ConfigParser.Error:
return None
else:
return p
def rax_slugify(value):
return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list_into_cache(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
cbs_attachments = collections.defaultdict(dict)
prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')
try:
# Ansible 2.3+
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', value_type='list')
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', islist=True)
try:
try:
# Ansible 2.3+
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, islist=True))
except Exception:
ip_versions = [4]
else:
ip_versions = [v for v in ip_versions if v in [4, 6]]
if not ip_versions:
ip_versions = [4]
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
if cs is None:
warnings.warn(
'Connecting to Rackspace region "%s" has caused Pyrax to '
'return None. Is this a valid region?' % region,
RuntimeWarning)
continue
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
if extra_group:
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in iteritems(server.metadata):
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
# Handle boot from volume
if not server.image:
if not cbs_attachments[region]:
cbs = pyrax.connect_to_cloud_blockstorage(region)
for vol in cbs.list():
if boolean(vol.bootable, strict=False):
for attachment in vol.attachments:
metadata = vol.volume_image_metadata
server_id = attachment['server_id']
cbs_attachments[region][server_id] = {
'id': metadata['image_id'],
'name': slugify(metadata['image_name'])
}
image = cbs_attachments[region].get(server.id)
if image:
server.image = {'id': image['id']}
hostvars[server.name]['rax_image'] = server.image
hostvars[server.name]['rax_boot_source'] = 'volume'
images[image['id']] = image['name']
else:
hostvars[server.name]['rax_boot_source'] = 'local'
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
ansible_ssh_host = None
# use accessIPv[46] instead of looping address for 'public'
for network_name in networks:
if ansible_ssh_host:
break
if network_name == 'public':
for version_name in ip_versions:
if ansible_ssh_host:
break
if version_name == 6 and server.accessIPv6:
ansible_ssh_host = server.accessIPv6
elif server.accessIPv4:
ansible_ssh_host = server.accessIPv4
if not ansible_ssh_host:
addresses = server.addresses.get(network_name, [])
for address in addresses:
for version_name in ip_versions:
if ansible_ssh_host:
break
if address.get('version') == version_name:
ansible_ssh_host = address.get('addr')
break
if ansible_ssh_host:
hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
with open(get_cache_file_path(regions), 'w') as cache_file:
json.dump(groups, cache_file)
def get_cache_file_path(regions):
regions_str = '.'.join([reg.strip().lower() for reg in regions])
ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp')
if not os.path.exists(ansible_tmp_path):
os.makedirs(ansible_tmp_path)
return os.path.join(ansible_tmp_path,
'ansible-rax-%s-%s.cache' % (
pyrax.identity.username, regions_str))
def _list(regions, refresh_cache=True):
cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
'RAX_CACHE_MAX_AGE', 600))
if (not os.path.exists(get_cache_file_path(regions)) or
refresh_cache or
(time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
# Cache file doesn't exist or older than 10m or refresh cache requested
_list_into_cache(regions)
with open(get_cache_file_path(regions), 'r') as cache_file:
groups = json.load(cache_file)
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help=('Force refresh of cache, making API requests to'
'RackSpace (default: False - use cache files)'))
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
creds_file = get_config(p, 'rax', 'creds_file',
'RAX_CREDS_FILE', None)
if creds_file is not None:
creds_file = os.path.expanduser(creds_file)
else:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.exit('No value in environment variable %s and/or no '
'credentials file at %s'
% ('RAX_CREDS_FILE', default_creds_file))
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception as e:
sys.exit("%s: %s" % (e, e.message))
regions = []
if region:
regions.append(region)
else:
try:
# Ansible 2.3+
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
value_type='list')
except TypeError:
# Ansible 2.2.x and below
# pylint: disable=unexpected-keyword-arg
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
islist=True)
for region in region_list:
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.exit('Unsupported region %s' % region)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions, refresh_cache=args.refresh_cache)
elif args.host:
host(regions, args.host)
sys.exit(0)
p = load_config_file()
if __name__ == '__main__':
main()

View file

@ -1 +0,0 @@
ovirt4.py

View file

@ -1,35 +0,0 @@
# Rudder external inventory script settings
#
[rudder]
# Your Rudder server API URL, typically:
# https://rudder.local/rudder/api
uri = https://rudder.local/rudder/api
# By default, Rudder uses a self-signed certificate. Set this to True
# to disable certificate validation.
disable_ssl_certificate_validation = True
# Your Rudder API token, created in the Web interface.
token = aaabbbccc
# Rudder API version to use, use "latest" for latest available
# version.
version = latest
# Property to use as group name in the output.
# Can generally be "id" or "displayName".
group_name = displayName
# Fail if there are two groups with the same name or two hosts with the
# same hostname in the output.
fail_if_name_collision = True
# We cache the results of Rudder API in a local file
cache_path = /tmp/ansible-rudder.cache
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# Set to 0 to disable cache.
cache_max_age = 500

View file

@ -1,286 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2015, Normation SAS
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
'''
Rudder external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
a Rudder server. This script is compatible with Rudder 2.10 or later.
The output JSON includes all your Rudder groups, containing the hostnames of
their nodes. Groups and nodes have a variable called rudder_group_id and
rudder_node_id, which is the Rudder internal id of the item, allowing to identify
them uniquely. Hosts variables also include your node properties, which are
key => value properties set by the API and specific to each node.
This script assumes there is an rudder.ini file alongside it. To specify a
different path to rudder.ini, define the RUDDER_INI_PATH environment variable:
export RUDDER_INI_PATH=/path/to/my_rudder.ini
You have to configure your Rudder server information, either in rudder.ini or
by overriding it with environment variables:
export RUDDER_API_VERSION='latest'
export RUDDER_API_TOKEN='my_token'
export RUDDER_API_URI='https://rudder.local/rudder/api'
'''
import sys
import os
import re
import argparse
import httplib2 as http
from time import time
from ansible.module_utils import six
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six.moves.urllib.parse import urlparse
import json
class RudderInventory(object):
def __init__(self):
''' Main execution path '''
# Empty inventory by default
self.inventory = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Create connection
self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation)
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_cache()
data_to_print = {}
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
data_to_print = self.get_list_info()
print(self.json_format_dict(data_to_print, True))
def read_settings(self):
''' Reads the settings from the rudder.ini file '''
if six.PY2:
config = configparser.SafeConfigParser()
else:
config = configparser.ConfigParser()
rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini')
rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path)))
config.read(rudder_ini_path)
self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token'))
self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version'))
self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri'))
self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation')
self.group_name = config.get('rudder', 'group_name')
self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision')
self.cache_path = config.get('rudder', 'cache_path')
self.cache_max_age = config.getint('rudder', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)')
self.args = parser.parse_args()
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path):
mod_time = os.path.getmtime(self.cache_path)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_cache(self):
''' Reads the cache from the cache file sets self.cache '''
cache = open(self.cache_path, 'r')
json_cache = cache.read()
try:
self.inventory = json.loads(json_cache)
except ValueError as e:
self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache')
def write_cache(self):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(self.inventory, True)
cache = open(self.cache_path, 'w')
cache.write(json_data)
cache.close()
def get_nodes(self):
''' Gets the nodes list from Rudder '''
path = '/nodes?select=nodeAndPolicyServer'
result = self.api_call(path)
nodes = {}
for node in result['data']['nodes']:
nodes[node['id']] = {}
nodes[node['id']]['hostname'] = node['hostname']
if 'properties' in node:
nodes[node['id']]['properties'] = node['properties']
else:
nodes[node['id']]['properties'] = []
return nodes
def get_groups(self):
''' Gets the groups list from Rudder '''
path = '/groups'
result = self.api_call(path)
groups = {}
for group in result['data']['groups']:
groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])}
return groups
def update_cache(self):
''' Fetches the inventory information from Rudder and creates the inventory '''
nodes = self.get_nodes()
groups = self.get_groups()
inventory = {}
for group in groups:
# Check for name collision
if self.fail_if_name_collision:
if groups[group]['name'] in inventory:
self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups')
# Add group to inventory
inventory[groups[group]['name']] = {}
inventory[groups[group]['name']]['hosts'] = []
inventory[groups[group]['name']]['vars'] = {}
inventory[groups[group]['name']]['vars']['rudder_group_id'] = group
for node in groups[group]['hosts']:
# Add node to group
inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname'])
properties = {}
for node in nodes:
# Check for name collision
if self.fail_if_name_collision:
if nodes[node]['hostname'] in properties:
self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts')
# Add node properties to inventory
properties[nodes[node]['hostname']] = {}
properties[nodes[node]['hostname']]['rudder_node_id'] = node
for node_property in nodes[node]['properties']:
properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value']
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = properties
self.inventory = inventory
if self.cache_max_age > 0:
self.write_cache()
def get_list_info(self):
''' Gets inventory information from local cache '''
return self.inventory
def get_host_info(self, hostname):
''' Gets information about a specific host from local cache '''
if hostname in self.inventory['_meta']['hostvars']:
return self.inventory['_meta']['hostvars'][hostname]
else:
return {}
def api_call(self, path):
''' Performs an API request '''
headers = {
'X-API-Token': self.token,
'X-API-Version': self.version,
'Content-Type': 'application/json;charset=utf-8'
}
target = urlparse(self.uri + path)
method = 'GET'
body = ''
try:
response, content = self.conn.request(target.geturl(), method, body, headers)
except Exception:
self.fail_with_error('Error connecting to Rudder server')
try:
data = json.loads(content)
except ValueError as e:
self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response')
return data
def fail_with_error(self, err_msg, err_operation=None):
''' Logs an error to std err for ansible-playbook to consume and exit '''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible variable names '''
return re.sub(r'[^A-Za-z0-9\_]', '_', word)
# Run the script
RudderInventory()

View file

@ -1,37 +0,0 @@
# Ansible dynamic inventory script for Scaleway cloud provider
#
[compute]
# Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable
#
# regions = all
# regions = ams1
# regions = par1, ams1
regions = par1
# Define a Scaleway token to perform required queries on the API
# in order to generate inventory output.
#
[auth]
# Token to authenticate with Scaleway's API.
# If not defined will read the SCALEWAY_TOKEN environment variable
#
api_token = mysecrettoken
# To avoid performing excessive calls to Scaleway API you can define a
# cache for the plugin output. Within the time defined in seconds, latest
# output will be reused. After that time, the cache will be refreshed.
#
[cache]
cache_max_age = 60
cache_dir = '~/.ansible/tmp'
[defaults]
# You may want to use only public IP addresses or private IP addresses.
# You can set public_ip_only configuration to get public IPs only.
# If not defined defaults to retrieving private IP addresses.
#
public_ip_only = false

View file

@ -1,220 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Scaleway
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Scaleway API
Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/)
Before using this script you may want to modify scaleway.ini config file.
This script generates an Ansible hosts file with these host groups:
<hostname>: Defines host itself with Scaleway's hostname as group name.
<tag>: Contains all hosts which has "<tag>" as tag.
<region>: Contains all hosts which are in the "<region>" region.
all: Contains all hosts defined in Scaleway.
'''
# (c) 2017, Paul B. <paul@bonaud.fr>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import os
import requests
from ansible.module_utils import six
from ansible.module_utils.six.moves import configparser
import sys
import time
import traceback
import json
EMPTY_GROUP = {
'children': [],
'hosts': []
}
class ScalewayAPI:
REGIONS = ['par1', 'ams1']
def __init__(self, auth_token, region):
self.session = requests.session()
self.session.headers.update({
'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0])
})
self.session.headers.update({
'X-Auth-Token': auth_token.encode('latin1')
})
self.base_url = 'https://cp-%s.scaleway.com' % (region)
def servers(self):
raw = self.session.get('/'.join([self.base_url, 'servers']))
try:
response = raw.json()
return self.get_resource('servers', response, raw)
except ValueError:
return []
def get_resource(self, resource, response, raw):
raw.raise_for_status()
if resource in response:
return response[resource]
else:
raise ValueError(
"Resource %s not found in Scaleway API response" % (resource))
def env_or_param(env_key, param=None, fallback=None):
env_value = os.environ.get(env_key)
if (param, env_value) == (None, None):
return fallback
elif env_value is not None:
return env_value
else:
return param
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat(
'/'.join([dpath, 'scaleway_ansible_inventory.json']))
except OSError:
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
else:
maxage = 60
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate_inv_from_api(config):
try:
inventory['scaleway'] = copy.deepcopy(EMPTY_GROUP)
auth_token = None
if config.has_option('auth', 'api_token'):
auth_token = config.get('auth', 'api_token')
auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token)
if auth_token is None:
sys.stderr.write('ERROR: missing authentication token for Scaleway API')
sys.exit(1)
if config.has_option('compute', 'regions'):
regions = config.get('compute', 'regions')
if regions == 'all':
regions = ScalewayAPI.REGIONS
else:
regions = map(str.strip, regions.split(','))
else:
regions = [
env_or_param('SCALEWAY_REGION', fallback='par1')
]
for region in regions:
api = ScalewayAPI(auth_token, region)
for server in api.servers():
hostname = server['hostname']
if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'):
ip = server['public_ip']['address']
else:
ip = server['private_ip']
for server_tag in server['tags']:
if server_tag not in inventory:
inventory[server_tag] = copy.deepcopy(EMPTY_GROUP)
inventory[server_tag]['children'].append(hostname)
if region not in inventory:
inventory[region] = copy.deepcopy(EMPTY_GROUP)
inventory[region]['children'].append(hostname)
inventory['scaleway']['children'].append(hostname)
inventory[hostname] = []
inventory[hostname].append(ip)
return inventory
except Exception:
# Return empty hosts output
traceback.print_exc()
return {'scaleway': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(config):
''' Reads the inventory from cache or Scaleway api '''
if cache_available(config):
inv = get_cache('scaleway_ansible_inventory.json', config)
else:
inv = generate_inv_from_api(config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
# Read config
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
if cache_available(config):
inventory = get_cache('scaleway_ansible_inventory.json', config)
else:
inventory = get_inventory(config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()

View file

@ -1,101 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.org/project/serfclient/
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import collections
import os
import sys
# https://pypi.org/project/serfclient/
from serfclient import SerfClient, EnvironmentConfig
import json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for key, value in node['Tags'].items():
groups[value].append(node['Name'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])

View file

@ -1,196 +0,0 @@
#!/usr/bin/env python
"""
SoftLayer external inventory script.
The SoftLayer Python API client is required. Use `pip install softlayer` to install it.
You have a few different options for configuring your username and api_key. You can pass
environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to
~/.softlayer or /etc/softlayer.conf. For more information see the SL API at:
- https://softlayer-python.readthedocs.io/en/latest/config_file.html
The SoftLayer Python client has a built in command for saving this configuration file
via the command `sl config setup`.
"""
# Copyright (C) 2014 AJ Bourg <aj@ajbourg.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# I found the structure of the ec2.py script very helpful as an example
# as I put this together. Thanks to whoever wrote that script!
#
import SoftLayer
import re
import argparse
import itertools
import json
class SoftLayerInventory(object):
common_items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'datacenter',
'tagReferences',
'userData.value',
]
vs_items = [
'lastKnownPowerState.name',
'powerState',
'maxCpu',
'maxMemory',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
hw_items = [
'hardwareStatusId',
'processorPhysicalCoreAmount',
'memoryCapacity',
]
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
'''Main path'''
self.inventory = self._empty_inventory()
self.parse_options()
if self.args.list:
self.get_all_servers()
print(self.json_format_dict(self.inventory, True))
elif self.args.host:
self.get_all_servers()
print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
return re.sub(r"[^A-Za-z0-9\-\.]", "_", word)
def push(self, my_dict, key, element):
'''Push an element onto an array that may not have been defined in the dict'''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def parse_options(self):
'''Parse all the arguments from the CLI'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
parser.add_argument('--list', action='store_true', default=False,
help='List instances (default: False)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
'''Converts a dict to a JSON object and dumps it as a formatted string'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def process_instance(self, instance, instance_type="virtual"):
'''Populate the inventory dictionary with any instance information'''
# only want active instances
if 'status' in instance and instance['status']['name'] != 'Active':
return
# and powered on instances
if 'powerState' in instance and instance['powerState']['name'] != 'Running':
return
# 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid
if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5:
return
# if there's no IP address, we can't reach it
if 'primaryIpAddress' not in instance:
return
instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else ''
dest = instance['primaryIpAddress']
instance['tags'] = list()
for tag in instance['tagReferences']:
instance['tags'].append(tag['tag']['name'])
del instance['tagReferences']
self.inventory["_meta"]["hostvars"][dest] = instance
# Inventory: group by memory
if 'maxMemory' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest)
elif 'memoryCapacity' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest)
# Inventory: group by cpu count
if 'maxCpu' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest)
elif 'processorPhysicalCoreAmount' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest)
# Inventory: group by datacenter
self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest)
# Inventory: group by hostname
self.push(self.inventory, self.to_safe(instance['hostname']), dest)
# Inventory: group by FQDN
self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest)
# Inventory: group by domain
self.push(self.inventory, self.to_safe(instance['domain']), dest)
# Inventory: group by type (hardware/virtual)
self.push(self.inventory, instance_type, dest)
for tag in instance['tags']:
self.push(self.inventory, tag, dest)
def get_virtual_servers(self):
'''Get all the CCI instances'''
vs = SoftLayer.VSManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
instances = vs.list_instances(mask=mask)
for instance in instances:
self.process_instance(instance)
def get_physical_servers(self):
'''Get all the hardware instances'''
hw = SoftLayer.HardwareManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
instances = hw.list_hardware(mask=mask)
for instance in instances:
self.process_instance(instance, 'hardware')
def get_all_servers(self):
self.client = SoftLayer.Client()
self.get_virtual_servers()
self.get_physical_servers()
SoftLayerInventory()

View file

@ -1,16 +0,0 @@
# Put this ini-file in the same directory as spacewalk.py
# Command line options have precedence over options defined in here.
[spacewalk]
# To limit the script on one organization in spacewalk, uncomment org_number
# and fill in the organization ID:
# org_number=2
# To prefix the group names with the organization ID set prefix_org_name=true.
# This is convenient when org_number is not set and you have the same group names
# in multiple organizations within spacewalk
# The prefix is "org_number-"
prefix_org_name=false
# Default cache_age for files created with spacewalk-report is 300sec.
cache_age=300

View file

@ -1,226 +0,0 @@
#!/usr/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <jonEbird@gmail.com>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <bernhard.lichtinger@lrz.de> 2015
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
import time
from optparse import OptionParser
import subprocess
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
base_dir = os.path.dirname(os.path.realpath(__file__))
default_ini_file = os.path.join(base_dir, "spacewalk.ini")
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
# Sanity check
if not os.path.exists(SW_REPORT):
print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 0o2775)
# Helper functions
# ------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
with open(cache_filename, 'r') as f:
lines = f.readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = ['spacewalk_' + key for key in keys]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
# ------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk', 'cache_age'):
CACHE_AGE = config.get('spacewalk', 'cache_age')
if not options.org_number and config.has_option('spacewalk', 'org_number'):
options.org_number = config.get('spacewalk', 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
# ------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError) as e:
print('Problem executing the command "%s system-groups": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
# List out the known server from Spacewalk
# ------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
groups = {}
meta = {"hostvars": {}}
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[system['spacewalk_group_id']]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
final = dict([(k, list(s)) for k, s in iteritems(groups)])
final["_meta"] = meta
print(json.dumps(final))
# print(json.dumps(groups))
sys.exit(0)
# Return a details information concerning the spacewalk server
# ------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
print('Host: %s' % options.host)
for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
sys.exit(0)
else:
parser.print_help()
sys.exit(1)

View file

@ -1,121 +0,0 @@
#!/usr/bin/env python
# (c) 2014, Tomas Karasek <tomas.karasek@digile.fi>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Dynamic inventory script which lets you use aliases from ~/.ssh/config.
#
# There were some issues with various Paramiko versions. I took a deeper look
# and tested heavily. Now, ansible parses this alright with Paramiko versions
# 1.7.2 to 1.15.2.
#
# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts
# with their alias, rather than with the IP or hostname. It takes advantage
# of the ansible_ssh_{host,port,user,private_key_file}.
#
# If you have in your .ssh/config:
# Host git
# HostName git.domain.org
# User tkarasek
# IdentityFile /home/tomk/keys/thekey
#
# You can do
# $ ansible git -m ping
#
# Example invocation:
# ssh_config.py --list
# ssh_config.py --host <alias>
import argparse
import os.path
import sys
import json
import paramiko
from ansible.module_utils.common._collections_compat import MutableSequence
SSH_CONF = '~/.ssh/config'
_key = 'ssh_config'
_ssh_to_ansible = [('user', 'ansible_ssh_user'),
('hostname', 'ansible_ssh_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_ssh_port')]
def get_config():
if not os.path.isfile(os.path.expanduser(SSH_CONF)):
return {}
with open(os.path.expanduser(SSH_CONF)) as f:
cfg = paramiko.SSHConfig()
cfg.parse(f)
ret_dict = {}
for d in cfg._config:
if isinstance(d['host'], MutableSequence):
alias = d['host'][0]
else:
alias = d['host']
if ('?' in alias) or ('*' in alias):
continue
_copy = dict(d)
del _copy['host']
if 'config' in _copy:
ret_dict[alias] = _copy['config']
else:
ret_dict[alias] = _copy
return ret_dict
def print_list():
cfg = get_config()
meta = {'hostvars': {}}
for alias, attributes in cfg.items():
tmp_dict = {}
for ssh_opt, ans_opt in _ssh_to_ansible:
if ssh_opt in attributes:
# If the attribute is a list, just take the first element.
# Private key is returned in a list for some reason.
attr = attributes[ssh_opt]
if isinstance(attr, MutableSequence):
attr = attr[0]
tmp_dict[ans_opt] = attr
if tmp_dict:
meta['hostvars'][alias] = tmp_dict
print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta}))
def print_host(host):
cfg = get_config()
print(json.dumps(cfg[host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script parsing .ssh/config')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from .ssh/config inventory'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])

View file

@ -1,180 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2016, Hugh Ma <hugh.ma@flextronics.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Stacki inventory script
# Configure stacki.yml with proper auth information and place in the following:
# - ../inventory/stacki.yml
# - /etc/stacki/stacki.yml
# - /etc/ansible/stacki.yml
# The stacki.yml file can contain entries for authentication information
# regarding the Stacki front-end node.
#
# use_hostnames uses hostname rather than interface ip as connection
#
#
"""
Example Usage:
List Stacki Nodes
$ ./stack.py --list
Example Configuration:
---
stacki:
auth:
stacki_user: admin
stacki_password: abc12345678910
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false
"""
import argparse
import os
import sys
import yaml
from distutils.version import StrictVersion
import json
try:
import requests
except Exception:
sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
def stack_auth(params):
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
client = requests.session()
client.get(endpoint)
init_csrf = client.cookies['csrftoken']
header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
'Content-type': 'application/x-www-form-urlencoded'}
login_endpoint = endpoint + "/login"
login_req = client.post(login_endpoint, data=auth_creds, headers=header)
csrftoken = login_req.cookies['csrftoken']
sessionid = login_req.cookies['sessionid']
auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
return client, auth_creds
def stack_build_header(auth_creds):
header = {'csrftoken': auth_creds['CSRFTOKEN'],
'X-CSRFToken': auth_creds['CSRFTOKEN'],
'sessionid': auth_creds['SESSIONID'],
'Content-type': 'application/json'}
return header
def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header)
return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header)
return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()),
frontends=dict(hosts=list()),
backends=dict(hosts=list()),
_meta=dict(hostvars=dict()))
# Iterate through list of dicts of hosts and remove
# environment key as it causes conflicts
for host in hostdata:
del host['environment']
meta['_meta']['hostvars'][host['host']] = host
meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
# @bbyhuy to improve readability in next iteration
for intf in intfdata:
if intf['host'] in meta['_meta']['hostvars']:
meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
if intf['default'] is True:
meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
if not use_hostnames:
meta['all']['hosts'].append(intf['ip'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['ip'])
else:
meta['frontends']['hosts'].append(intf['ip'])
else:
meta['all']['hosts'].append(intf['host'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['host'])
else:
meta['frontends']['hosts'].append(intf['host'])
return meta
def parse_args():
parser = argparse.ArgumentParser(description='Stacki Inventory Module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active hosts')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script')
try:
config_files = CONFIG_FILES
config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
config = None
for cfg_file in config_files:
if os.path.isfile(cfg_file):
stream = open(cfg_file, 'r')
config = yaml.safe_load(stream)
break
if not config:
sys.stderr.write("No config file found at {0}\n".format(config_files))
sys.exit(1)
client, auth_creds = stack_auth(config['stacki']['auth'])
header = stack_build_header(auth_creds)
host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
final_meta = format_meta(host_list, intf_list, config)
print(json.dumps(final_meta, indent=4))
except Exception as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()

View file

@ -1,7 +0,0 @@
---
stacki:
auth:
stacki_user: admin
stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false

View file

@ -1,123 +0,0 @@
#!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_path = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <mark@compoundtheory.com>
# 2015 Igor Khomyakov <homyakov@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import os.path
import subprocess
import re
from paramiko import SSHConfig
from optparse import OptionParser
from collections import defaultdict
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import StringIO
_group = 'vagrant' # a default group
_ssh_to_ansible = [('user', 'ansible_user'),
('hostname', 'ansible_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_port')]
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
# list all the running boxes
def list_running_boxes():
output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
boxes = []
for line in output:
matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
# get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
config = SSHConfig()
config.parse(StringIO(output))
host_config = config.lookup(box_name)
# man 5 ssh_config:
# > It is possible to have multiple identity files ...
# > all these identities will be tried in sequence.
for id in host_config['identityfile']:
if os.path.isfile(id):
host_config['identityfile'] = id
return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
meta = defaultdict(dict)
for host in ssh_config:
meta['hostvars'][host] = ssh_config[host]
print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
sys.exit(0)
# Get out the host details
# ------------------------------
elif options.host:
print(json.dumps(get_a_ssh_config(options.host)))
sys.exit(0)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(0)

View file

@ -1,107 +0,0 @@
#!/usr/bin/env python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from subprocess import Popen, PIPE
import json
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
VBOX = "VBoxManage"
def get_hosts(host=None):
returned = {}
try:
if host:
p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
else:
returned = {'all': set(), '_metadata': {}}
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
except Exception:
sys.exit(1)
hostvars = {}
prevkey = pref_k = ''
for line in p.stdout.readlines():
try:
k, v = line.split(':', 1)
except Exception:
continue
if k == '':
continue
v = v.strip()
if k.startswith('Name'):
if v not in hostvars:
curname = v
hostvars[curname] = {}
try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE)
ipinfo = x.stdout.read()
if 'Value' in ipinfo:
a, ip = ipinfo.split(':', 1)
hostvars[curname]['ansible_ssh_host'] = ip.strip()
except Exception:
pass
continue
if not host:
if k == 'Groups':
for group in v.split('/'):
if group:
if group not in returned:
returned[group] = set()
returned[group].add(curname)
returned['all'].add(curname)
continue
pref_k = 'vbox_' + k.strip().replace(' ', '_')
if k.startswith(' '):
if prevkey not in hostvars[curname]:
hostvars[curname][prevkey] = {}
hostvars[curname][prevkey][pref_k] = v
else:
if v != '':
hostvars[curname][pref_k] = v
prevkey = pref_k
if not host:
returned['_metadata']['hostvars'] = hostvars
else:
returned = hostvars[host]
return returned
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
if hostname:
inventory = get_hosts(hostname)
else:
inventory = get_hosts()
sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder))

View file

@ -1,33 +0,0 @@
#!/usr/bin/env python
# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from subprocess import Popen, PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = []
for l in pipe.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':')
if s[1] != 'global':
result['all']['hosts'].append(s[1])
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'zone'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'zone'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")

View file

@ -1,10 +0,0 @@
[azure_keyvault] # Used with Azure KeyVault
vault_name=django-keyvault
secret_name=vaultpw
secret_version=9k1e6c7367b33eac8ee241b3698009f3
[azure] # Used by Dynamic Inventory
group_by_resource_group=yes
group_by_location=yes
group_by_security_group=yes
group_by_tag=yes

View file

@ -1,595 +0,0 @@
#!/usr/bin/env python
#
# This script borrows a great deal of code from the azure_rm.py dynamic inventory script
# that is packaged with Ansible. This can be found in the Ansible GitHub project at:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/azure_rm.py
#
# The Azure Dynamic Inventory script was written by:
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
# Altered/Added for Vault functionality:
# Austin Hobbs, GitHub: @OxHobbs
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Ansible Vault Password with Azure Key Vault Secret Script
=========================================================
This script is designed to be used with Ansible Vault. It provides the
capability to provide this script as the password file to the ansible-vault
command. This script uses the Azure Python SDK. For instruction on installing
the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials for all
authentication parameters.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
- cloud_environment
- adfs_authority_url
- vault-name
- secret-name
- secret-version
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
- AZURE_CLOUD_ENVIRONMENT
- AZURE_ADFS_AUTHORITY_URL
- AZURE_VAULT_NAME
- AZURE_VAULT_SECRET_NAME
- AZURE_VAULT_SECRET_VERSION
Vault
-----
The order of precedence of Azure Key Vault Secret information is the same.
Command line arguments, environment variables, and finally the azure_vault.ini
file with the [azure_keyvault] section.
azure_vault.ini (or azure_rm.ini if merged with Azure Dynamic Inventory Script)
------------------------------------------------------------------------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_vault.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_vault') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_VAULT_INI_PATH environment variable:
export AZURE_VAULT_INI_PATH=/path/to/custom.ini
or
export AZURE_VAULT_INI_PATH=[same path as azure_rm.ini if merged]
__NOTE__: If using the azure_rm.py dynamic inventory script, it is possible to use the same .ini
file for both the azure_rm dynamic inventory and the azure_vault password file. Simply add a section
named [azure_keyvault] to the ini file with the following properties: vault_name, secret_name and
secret_version.
Examples:
---------
Validate the vault_pw script with Python
$ python azure_vault.py -n mydjangovault -s vaultpw -v 6b6w7f7252b44eac8ee726b3698009f3
$ python azure_vault.py --vault-name 'mydjangovault' --secret-name 'vaultpw' \
--secret-version 6b6w7f7252b44eac8ee726b3698009f3
Use with a playbook
$ ansible-playbook -i ./azure_rm.py my_playbook.yml --limit galaxy-qa --vault-password-file ./azure_vault.py
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
- Austin Hobbs (@OxHobbs)
Company: Ansible by Red Hat, Microsoft
Version: 0.1.0
'''
import argparse
import os
import re
import sys
import inspect
from azure.keyvault import KeyVaultClient
from ansible.module_utils.six.moves import configparser as cp
from os.path import expanduser
import ansible.module_utils.six.moves.urllib.parse as urlparse
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
CLIError = None
try:
from msrestazure.azure_active_directory import AADTokenCredentials
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure import azure_cloud
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.subscriptions import SubscriptionClient
from azure.mgmt.compute import ComputeManagementClient
from adal.authentication_context import AuthenticationContext
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
try:
from ansible.release import __version__ as ansible_version
except ImportError:
ansible_version = 'unknown'
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
)
AZURE_VAULT_SETTINGS = dict(
vault_name='AZURE_VAULT_NAME',
secret_name='AZURE_VAULT_SECRET_NAME',
secret_version='AZURE_VAULT_SECRET_VERSION',
)
AZURE_MIN_VERSION = "2.0.0"
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
class AzureRM(object):
def __init__(self, args):
self._args = args
self._cloud_environment = None
self._compute_client = None
self._resource_client = None
self._network_client = None
self._adfs_authority_url = None
self._vault_client = None
self._resource = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(
raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format(
[x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
# get authentication authority
# for adfs, user could pass in authority or not.
# for others, use default authority from cloud environment
if self.credentials.get('adfs_authority_url'):
self._adfs_authority_url = self.credentials.get('adfs_authority_url')
else:
self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
# get resource from cloud environment
self._resource = self._cloud_environment.endpoints.active_directory_resource_id
if self.credentials.get('credentials'):
self.azure_credentials = self.credentials.get('credentials')
elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment)
elif self.credentials.get('ad_user') is not None and \
self.credentials.get('password') is not None and \
self.credentials.get('client_id') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = self.acquire_token_with_username_password(
self._adfs_authority_url,
self._resource,
self.credentials['ad_user'],
self.credentials['password'],
self.credentials['client_id'],
self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common'
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment)
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password, or "
"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, "
"or be logged in using AzureCLI.")
def log(self, msg):
if self.debug:
print(msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = cp.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except Exception:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_msi_credentials(self, subscription_id_param=None):
credentials = MSIAuthentication()
try:
# try to get the subscription in MSI to test whether MSI is enabled
subscription_client = SubscriptionClient(credentials)
subscription = next(subscription_client.subscriptions.list())
subscription_id = str(subscription.subscription_id)
return {
'credentials': credentials,
'subscription_id': subscription_id_param or subscription_id
}
except Exception as exc:
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
if arg_credentials['ad_user'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
if msi_credentials:
self.log('Retrieved credentials from MSI.')
return msi_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
authority_uri = authority
if tenant is not None:
authority_uri = authority + '/' + tenant
context = AuthenticationContext(authority_uri)
token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
return AADTokenCredentials(token_response)
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
def get_mgmt_svc_client(self, client_type, base_url, api_version):
client = client_type(self.azure_credentials,
self.subscription_id,
base_url=base_url,
api_version=api_version)
client.config.add_user_agent(ANSIBLE_USER_AGENT)
return client
def get_vault_client(self):
return KeyVaultClient(self.azure_credentials)
def get_vault_suffix(self):
return self._cloud_environment.suffixes.keyvault_dns
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-06-01')
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-05-10')
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
self._cloud_environment.endpoints.resource_manager,
'2017-03-30')
self._register('Microsoft.Compute')
return self._compute_client
@property
def vault_client(self):
self.log('Getting the Key Vault client')
if not self._vault_client:
self._vault_client = self.get_vault_client()
return self._vault_client
class AzureKeyVaultSecret:
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._get_vault_settings()
if self._args.vault_name:
self.vault_name = self._args.vault_name
if self._args.secret_name:
self.secret_name = self._args.secret_name
if self._args.secret_version:
self.secret_version = self._args.secret_version
self._vault_suffix = rm.get_vault_suffix()
self._vault_client = rm.vault_client
print(self.get_password_from_vault())
def _parse_cli_args(self):
parser = argparse.ArgumentParser(
description='Obtain the vault password used to secure your Ansilbe secrets'
)
parser.add_argument('-n', '--vault-name', action='store', help='Name of Azure Key Vault')
parser.add_argument('-s', '--secret-name', action='store',
help='Name of the secret stored in Azure Key Vault')
parser.add_argument('-v', '--secret-version', action='store',
help='Version of the secret to be retrieved')
parser.add_argument('--debug', action='store_true', default=False,
help='Send the debug messages to STDOUT')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad_user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--adfs_authority_url', action='store',
help='Azure ADFS authority url')
parser.add_argument('--cloud_environment', action='store',
help='Azure Cloud Environment name or metadata discovery URL')
return parser.parse_args()
def get_password_from_vault(self):
vault_url = 'https://{0}{1}'.format(self.vault_name, self._vault_suffix)
secret = self._vault_client.get_secret(vault_url, self.secret_name, self.secret_version)
return secret.value
def _get_vault_settings(self):
env_settings = self._get_vault_env_settings()
if None not in set(env_settings.values()):
for key in AZURE_VAULT_SETTINGS:
setattr(self, key, env_settings.get(key, None))
else:
file_settings = self._load_vault_settings()
if not file_settings:
return
for key in AZURE_VAULT_SETTINGS:
if file_settings.get(key):
setattr(self, key, file_settings.get(key))
def _get_vault_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_VAULT_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_vault_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_VAULT_INI_PATH', default_path)))
config = None
settings = None
try:
config = cp.ConfigParser()
config.read(path)
except Exception:
pass
if config is not None:
settings = dict()
for key in AZURE_VAULT_SETTINGS:
try:
settings[key] = config.get('azure_keyvault', key, raw=True)
except Exception:
pass
return settings
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(
AZURE_MIN_VERSION, HAS_AZURE_EXC))
AzureKeyVaultSecret()
if __name__ == '__main__':
main()

View file

@ -1,134 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <matt@sivel.net>
# (c) 2016, Justin Mayer <https://justinmayer.com/>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# =============================================================================
#
# This script is to be used with ansible-vault's --vault-id arg
# to retrieve the vault password via your OS's native keyring application.
#
# This file *MUST* be saved with executable permissions. Otherwise, Ansible
# will try to parse as a password file and display: "ERROR! Decryption failed"
#
# The `keyring` Python module is required: https://pypi.org/project/keyring/
#
# By default, this script will store the specified password in the keyring of
# the user that invokes the script. To specify a user keyring, add a [vault]
# section to your ansible.cfg file with a 'username' option. Example:
#
# [vault]
# username = 'ansible-vault'
#
# In useage like:
#
# ansible-vault --vault-id keyring_id@contrib/vault/vault-keyring-client.py view some_encrypted_file
#
# --vault-id will call this script like:
#
# contrib/vault/vault-keyring-client.py --vault-id keyring_id
#
# That will retrieve the password from users keyring for the
# keyring service 'keyring_id'. The equivalent of:
#
# keyring get keyring_id $USER
#
# If no vault-id name is specified to ansible command line, the vault-keyring-client.py
# script will be called without a '--vault-id' and will default to the keyring service 'ansible'
# This is equivalent to:
#
# keyring get ansible $USER
#
# You can configure the `vault_password_file` option in ansible.cfg:
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring-client.py
# ...
#
# To set your password, `cd` to your project directory and run:
#
# # will use default keyring service / vault-id of 'ansible'
# /path/to/vault-keyring-client.py --set
#
# or to specify the keyring service / vault-id of 'my_ansible_secret':
#
# /path/to/vault-keyring-client.py --vault-id my_ansible_secret --set
#
# If you choose not to configure the path to `vault_password_file` in
# ansible.cfg, your `ansible-playbook` command might look like:
#
# ansible-playbook --vault-id=keyring_id@/path/to/vault-keyring-client.py site.yml
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import sys
import getpass
import keyring
from ansible.config.manager import ConfigManager
KEYNAME_UNKNOWN_RC = 2
def build_arg_parser():
parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
parser.add_argument('--vault-id', action='store', default=None,
dest='vault_id',
help='name of the vault secret to get from keyring')
parser.add_argument('--username', action='store', default=None,
help='the username whose keyring is queried')
parser.add_argument('--set', action='store_true', default=False,
dest='set_password',
help='set the password instead of getting it')
return parser
def main():
config_manager = ConfigManager()
username = config_manager.data.get_setting('vault.username')
if not username:
username = getpass.getuser()
keyname = config_manager.data.get_setting('vault.keyname')
if not keyname:
keyname = 'ansible'
arg_parser = build_arg_parser()
args = arg_parser.parse_args()
username = args.username or username
keyname = args.vault_id or keyname
# print('username: %s keyname: %s' % (username, keyname))
if args.set_password:
intro = 'Storing password in "{}" user keyring using key name: {}\n'
sys.stdout.write(intro.format(username, keyname))
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password(keyname, username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
secret = keyring.get_password(keyname, username)
if secret is None:
sys.stderr.write('vault-keyring-client could not find key="%s" for user="%s" via backend="%s"\n' %
(keyname, username, keyring.get_keyring().name))
sys.exit(KEYNAME_UNKNOWN_RC)
# print('secret: %s' % secret)
sys.stdout.write('%s\n' % secret)
sys.exit(0)
if __name__ == '__main__':
main()

View file

@ -1,87 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <matt@sivel.net>
# (c) 2016, Justin Mayer <https://justinmayer.com/>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# =============================================================================
#
# This script is to be used with vault_password_file or --vault-password-file
# to retrieve the vault password via your OS's native keyring application.
#
# This file *MUST* be saved with executable permissions. Otherwise, Ansible
# will try to parse as a password file and display: "ERROR! Decryption failed"
#
# The `keyring` Python module is required: https://pypi.org/project/keyring/
#
# By default, this script will store the specified password in the keyring of
# the user that invokes the script. To specify a user keyring, add a [vault]
# section to your ansible.cfg file with a 'username' option. Example:
#
# [vault]
# username = 'ansible-vault'
#
# Another optional setting is for the key name, which allows you to use this
# script to handle multiple project vaults with different passwords:
#
# [vault]
# keyname = 'ansible-vault-yourproject'
#
# You can configure the `vault_password_file` option in ansible.cfg:
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring.py
# ...
#
# To set your password, `cd` to your project directory and run:
#
# python /path/to/vault-keyring.py set
#
# If you choose not to configure the path to `vault_password_file` in
# ansible.cfg, your `ansible-playbook` command might look like:
#
# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import getpass
import keyring
from ansible.config.manager import ConfigManager, get_ini_config_value
def main():
config = ConfigManager()
username = get_ini_config_value(
config._parsers[config._config_file],
dict(section='vault', key='username')
) or getpass.getuser()
keyname = get_ini_config_value(
config._parsers[config._config_file],
dict(section='vault', key='keyname')
) or 'ansible'
if len(sys.argv) == 2 and sys.argv[1] == 'set':
intro = 'Storing password in "{}" user keyring using key name: {}\n'
sys.stdout.write(intro.format(username, keyname))
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password(keyname, username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
sys.stdout.write('{0}\n'.format(keyring.get_password(keyname,
username)))
sys.exit(0)
if __name__ == '__main__':
main()

View file

@ -1,3 +0,0 @@
shippable/cloud/group1
cloud/foreman
needs/file/scripts/inventory/foreman.py

View file

@ -1,10 +0,0 @@
#!/usr/bin/env bash
# Wrapper to use the correct Python interpreter and support code coverage.
REL_SCRIPT="../../../../scripts/inventory/foreman.py"
ABS_SCRIPT="$("${ANSIBLE_TEST_PYTHON_INTERPRETER}" -c "import os; print(os.path.abspath('${REL_SCRIPT}'))")"
# Make sure output written to current directory ends up in the temp dir.
cd "${OUTPUT_DIR}"
python.py "${ABS_SCRIPT}" "$@"

View file

@ -1,50 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
export FOREMAN_HOST="${FOREMAN_HOST:-localhost}"
export FOREMAN_PORT="${FOREMAN_PORT:-8080}"
export FOREMAN_INI_PATH="${OUTPUT_DIR}/foreman.ini"
############################################
# SMOKETEST WITH SIMPLE INI
############################################
cat > "$FOREMAN_INI_PATH" <<FOREMAN_INI
[foreman]
url = http://${FOREMAN_HOST}:${FOREMAN_PORT}
user = ansible-tester
password = secure
ssl_verify = False
use_reports_api = False
FOREMAN_INI
# use ansible to validate the return data
ansible-playbook -i foreman.sh test_foreman_inventory.yml --connection=local
RC=$?
if [[ $RC != 0 ]]; then
echo "foreman inventory script smoketest failed"
exit $RC
fi
############################################
# SMOKETEST WITH NON-ASCII INI
############################################
cat > "$FOREMAN_INI_PATH" <<FOREMAN_INI
[foreman]
url = http://${FOREMAN_HOST}:${FOREMAN_PORT}
user = ansible-tester
password = secure
ssl_verify = False
group_prefix = Ľuboš_
FOREMAN_INI
# use ansible to validate the return data
ansible-playbook -i foreman.sh test_foreman_inventory.yml --connection=local
RC=$?
if [[ $RC != 0 ]]; then
echo "foreman inventory script non-ascii failed"
exit $RC
fi

View file

@ -1,7 +0,0 @@
- name: check the foreman inventory script result size and attributes
hosts: localhost
gather_facts: False
tasks:
- assert:
that:
- "{{ groups['all']|length > 900 }}"