2016-05-31 02:41:48 +02:00
|
|
|
#!/usr/bin/env python
|
2017-08-18 21:23:45 +02:00
|
|
|
#
|
|
|
|
# This file is part of Ansible,
|
|
|
|
#
|
|
|
|
# Ansible is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Ansible is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
# Requirements
|
|
|
|
# - pyvmomi >= 6.0.0.2016.4
|
|
|
|
|
|
|
|
# TODO:
|
|
|
|
# * more jq examples
|
2016-12-11 03:50:09 +01:00
|
|
|
# * optional folder heriarchy
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
"""
|
|
|
|
$ jq '._meta.hostvars[].config' data.json | head
|
|
|
|
{
|
|
|
|
"alternateguestname": "",
|
|
|
|
"instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
|
|
|
|
"memoryhotaddenabled": false,
|
|
|
|
"guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
|
|
|
|
"changeversion": "2016-05-16T18:43:14.977925Z",
|
|
|
|
"uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
|
|
|
|
"cpuhotremoveenabled": false,
|
|
|
|
"vpmcenabled": false,
|
|
|
|
"firmware": "bios",
|
|
|
|
"""
|
|
|
|
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
import atexit
|
|
|
|
import datetime
|
|
|
|
import getpass
|
2017-07-23 03:15:46 +02:00
|
|
|
import json
|
2016-05-31 02:41:48 +02:00
|
|
|
import os
|
2017-03-18 02:25:00 +01:00
|
|
|
import re
|
2016-05-31 02:41:48 +02:00
|
|
|
import ssl
|
|
|
|
import sys
|
|
|
|
import uuid
|
|
|
|
from collections import defaultdict
|
|
|
|
from time import time
|
2017-07-23 03:15:46 +02:00
|
|
|
|
|
|
|
import six
|
2017-03-18 02:25:00 +01:00
|
|
|
from jinja2 import Environment
|
2017-07-23 03:15:46 +02:00
|
|
|
from six import integer_types, string_types
|
|
|
|
from six.moves import configparser
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2017-07-23 03:15:46 +02:00
|
|
|
try:
|
|
|
|
import argparse
|
|
|
|
except ImportError:
|
|
|
|
sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7')
|
2017-07-18 13:20:23 +02:00
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
try:
|
2017-07-18 13:20:23 +02:00
|
|
|
from pyVmomi import vim, vmodl
|
2016-05-31 02:41:48 +02:00
|
|
|
from pyVim.connect import SmartConnect, Disconnect
|
|
|
|
except ImportError:
|
2017-07-18 13:20:23 +02:00
|
|
|
sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it")
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
hasvcr = False
|
|
|
|
try:
|
|
|
|
import vcr
|
2017-01-10 15:09:11 +01:00
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
hasvcr = True
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
|
2017-03-18 02:25:00 +01:00
|
|
|
def regex_match(s, pattern):
|
|
|
|
'''Custom filter for regex matching'''
|
|
|
|
reg = re.compile(pattern)
|
|
|
|
if reg.match(s):
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
class VMwareMissingHostException(Exception):
|
|
|
|
pass
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
class VMWareInventory(object):
|
2016-05-31 02:41:48 +02:00
|
|
|
__name__ = 'VMWareInventory'
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
guest_props = False
|
2016-05-31 02:41:48 +02:00
|
|
|
instances = []
|
|
|
|
debug = False
|
|
|
|
load_dumpfile = None
|
|
|
|
write_dumpfile = None
|
|
|
|
maxlevel = 1
|
|
|
|
lowerkeys = True
|
|
|
|
config = None
|
|
|
|
cache_max_age = None
|
|
|
|
cache_path_cache = None
|
|
|
|
cache_path_index = None
|
2017-01-10 15:09:11 +01:00
|
|
|
cache_dir = None
|
2016-05-31 02:41:48 +02:00
|
|
|
server = None
|
|
|
|
port = None
|
|
|
|
username = None
|
|
|
|
password = None
|
2017-01-10 15:09:11 +01:00
|
|
|
validate_certs = True
|
2016-05-31 02:41:48 +02:00
|
|
|
host_filters = []
|
2017-01-12 15:37:18 +01:00
|
|
|
skip_keys = []
|
2016-05-31 02:41:48 +02:00
|
|
|
groupby_patterns = []
|
|
|
|
|
2017-07-23 03:15:46 +02:00
|
|
|
safe_types = [bool, str, float, None] + list(integer_types)
|
2016-05-31 02:41:48 +02:00
|
|
|
iter_types = [dict, list]
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
|
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
vimTableMaxDepth = {
|
|
|
|
"vim.HostSystem": 2,
|
|
|
|
"vim.VirtualMachine": 2,
|
|
|
|
}
|
|
|
|
|
2017-01-12 15:37:18 +01:00
|
|
|
custom_fields = {}
|
|
|
|
|
2017-03-18 02:25:00 +01:00
|
|
|
# use jinja environments to allow for custom filters
|
|
|
|
env = Environment()
|
|
|
|
env.filters['regex_match'] = regex_match
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
# translation table for attributes to fetch for known vim types
|
2017-07-18 13:20:23 +02:00
|
|
|
|
|
|
|
vimTable = {
|
|
|
|
vim.Datastore: ['_moId', 'name'],
|
|
|
|
vim.ResourcePool: ['_moId', 'name'],
|
|
|
|
vim.HostSystem: ['_moId', 'name'],
|
|
|
|
}
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
@staticmethod
|
|
|
|
def _empty_inventory():
|
|
|
|
return {"_meta": {"hostvars": {}}}
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
def __init__(self, load=True):
|
2017-01-10 15:09:11 +01:00
|
|
|
self.inventory = VMWareInventory._empty_inventory()
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
if load:
|
|
|
|
# Read settings and parse CLI arguments
|
|
|
|
self.parse_cli_args()
|
|
|
|
self.read_settings()
|
|
|
|
|
|
|
|
# Check the cache
|
|
|
|
cache_valid = self.is_cache_valid()
|
|
|
|
|
|
|
|
# Handle Cache
|
|
|
|
if self.args.refresh_cache or not cache_valid:
|
|
|
|
self.do_api_calls_update_cache()
|
|
|
|
else:
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('loading inventory from cache')
|
2016-05-31 02:41:48 +02:00
|
|
|
self.inventory = self.get_inventory_from_cache()
|
|
|
|
|
|
|
|
def debugl(self, text):
|
|
|
|
if self.args.debug:
|
2016-07-20 15:46:50 +02:00
|
|
|
try:
|
|
|
|
text = str(text)
|
|
|
|
except UnicodeEncodeError:
|
2017-01-10 15:09:11 +01:00
|
|
|
text = text.encode('ascii', 'ignore')
|
2016-10-18 22:21:37 +02:00
|
|
|
print('%s %s' % (datetime.datetime.now(), text))
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
def show(self):
|
|
|
|
# Data to print
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('dumping results')
|
2016-05-31 02:41:48 +02:00
|
|
|
data_to_print = None
|
|
|
|
if self.args.host:
|
|
|
|
data_to_print = self.get_host_info(self.args.host)
|
|
|
|
elif self.args.list:
|
|
|
|
# Display list of instances for inventory
|
|
|
|
data_to_print = self.inventory
|
|
|
|
return json.dumps(data_to_print, indent=2)
|
|
|
|
|
|
|
|
def is_cache_valid(self):
|
|
|
|
|
|
|
|
''' Determines if the cache files have expired, or if it is still valid '''
|
|
|
|
|
|
|
|
valid = False
|
|
|
|
|
|
|
|
if os.path.isfile(self.cache_path_cache):
|
|
|
|
mod_time = os.path.getmtime(self.cache_path_cache)
|
|
|
|
current_time = time()
|
|
|
|
if (mod_time + self.cache_max_age) > current_time:
|
|
|
|
valid = True
|
|
|
|
|
|
|
|
return valid
|
|
|
|
|
|
|
|
def do_api_calls_update_cache(self):
|
|
|
|
|
|
|
|
''' Get instances and cache the data '''
|
|
|
|
|
2017-01-12 15:37:18 +01:00
|
|
|
self.inventory = self.instances_to_inventory(self.get_instances())
|
|
|
|
self.write_to_cache(self.inventory)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2017-01-12 15:37:18 +01:00
|
|
|
def write_to_cache(self, data):
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
''' Dump inventory to json file '''
|
|
|
|
|
|
|
|
with open(self.cache_path_cache, 'wb') as f:
|
|
|
|
f.write(json.dumps(data))
|
|
|
|
|
|
|
|
def get_inventory_from_cache(self):
|
|
|
|
|
|
|
|
''' Read in jsonified inventory '''
|
|
|
|
|
|
|
|
jdata = None
|
|
|
|
with open(self.cache_path_cache, 'rb') as f:
|
|
|
|
jdata = f.read()
|
|
|
|
return json.loads(jdata)
|
|
|
|
|
|
|
|
def read_settings(self):
|
|
|
|
|
|
|
|
''' Reads the settings from the vmware_inventory.ini file '''
|
|
|
|
|
2016-06-08 00:10:57 +02:00
|
|
|
scriptbasename = __file__
|
2016-05-31 02:41:48 +02:00
|
|
|
scriptbasename = os.path.basename(scriptbasename)
|
|
|
|
scriptbasename = scriptbasename.replace('.py', '')
|
|
|
|
|
|
|
|
defaults = {'vmware': {
|
|
|
|
'server': '',
|
|
|
|
'port': 443,
|
|
|
|
'username': '',
|
|
|
|
'password': '',
|
2016-09-30 15:06:02 +02:00
|
|
|
'validate_certs': True,
|
2016-06-08 00:10:57 +02:00
|
|
|
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
|
2016-05-31 02:41:48 +02:00
|
|
|
'cache_name': 'ansible-vmware',
|
|
|
|
'cache_path': '~/.ansible/tmp',
|
|
|
|
'cache_max_age': 3600,
|
2017-01-10 15:09:11 +01:00
|
|
|
'max_object_level': 1,
|
2017-01-12 15:37:18 +01:00
|
|
|
'skip_keys': 'declaredalarmstate,'
|
|
|
|
'disabledmethod,'
|
|
|
|
'dynamicproperty,'
|
|
|
|
'dynamictype,'
|
|
|
|
'environmentbrowser,'
|
|
|
|
'managedby,'
|
|
|
|
'parent,'
|
|
|
|
'childtype,'
|
|
|
|
'resourceconfig',
|
2017-01-10 15:09:11 +01:00
|
|
|
'alias_pattern': '{{ config.name + "_" + config.uuid }}',
|
|
|
|
'host_pattern': '{{ guest.ipaddress }}',
|
|
|
|
'host_filters': '{{ guest.gueststate == "running" }}',
|
|
|
|
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
|
2017-01-12 15:37:18 +01:00
|
|
|
'lower_var_keys': True,
|
|
|
|
'custom_field_group_prefix': 'vmware_tag_',
|
|
|
|
'groupby_custom_field': False}
|
2017-01-10 15:09:11 +01:00
|
|
|
}
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
if six.PY3:
|
|
|
|
config = configparser.ConfigParser()
|
|
|
|
else:
|
|
|
|
config = configparser.SafeConfigParser()
|
|
|
|
|
|
|
|
# where is the config?
|
|
|
|
vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
|
|
|
|
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
|
|
|
|
config.read(vmware_ini_path)
|
|
|
|
|
|
|
|
# apply defaults
|
2017-01-10 15:09:11 +01:00
|
|
|
for k, v in defaults['vmware'].items():
|
2016-05-31 02:41:48 +02:00
|
|
|
if not config.has_option('vmware', k):
|
2017-01-10 15:09:11 +01:00
|
|
|
config.set('vmware', k, str(v))
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
# where is the cache?
|
|
|
|
self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
|
|
|
|
if self.cache_dir and not os.path.exists(self.cache_dir):
|
|
|
|
os.makedirs(self.cache_dir)
|
|
|
|
|
|
|
|
# set the cache filename and max age
|
|
|
|
cache_name = config.get('vmware', 'cache_name')
|
|
|
|
self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('cache path is %s' % self.cache_path_cache)
|
2016-05-31 02:41:48 +02:00
|
|
|
self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
|
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
# mark the connection info
|
|
|
|
self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('server is %s' % self.server)
|
2016-05-31 02:41:48 +02:00
|
|
|
self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
|
|
|
|
self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('username is %s' % self.username)
|
2016-05-31 02:41:48 +02:00
|
|
|
self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password'))
|
2016-09-30 15:06:02 +02:00
|
|
|
self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
|
|
|
|
if self.validate_certs in ['no', 'false', 'False', False]:
|
|
|
|
self.validate_certs = False
|
2017-01-10 15:09:11 +01:00
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('cert validation is %s' % self.validate_certs)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
# behavior control
|
|
|
|
self.maxlevel = int(config.get('vmware', 'max_object_level'))
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('max object level is %s' % self.maxlevel)
|
2016-05-31 02:41:48 +02:00
|
|
|
self.lowerkeys = config.get('vmware', 'lower_var_keys')
|
|
|
|
if type(self.lowerkeys) != bool:
|
|
|
|
if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
|
|
|
|
self.lowerkeys = True
|
2017-01-10 15:09:11 +01:00
|
|
|
else:
|
2016-05-31 02:41:48 +02:00
|
|
|
self.lowerkeys = False
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('lower keys is %s' % self.lowerkeys)
|
2017-01-12 15:37:18 +01:00
|
|
|
self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
|
|
|
|
self.debugl('skip keys is %s' % self.skip_keys)
|
2016-05-31 02:41:48 +02:00
|
|
|
self.host_filters = list(config.get('vmware', 'host_filters').split(','))
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('host filters are %s' % self.host_filters)
|
2016-05-31 02:41:48 +02:00
|
|
|
self.groupby_patterns = list(config.get('vmware', 'groupby_patterns').split(','))
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('groupby patterns are %s' % self.groupby_patterns)
|
|
|
|
|
|
|
|
# Special feature to disable the brute force serialization of the
|
|
|
|
# virtulmachine objects. The key name for these properties does not
|
|
|
|
# matter because the values are just items for a larger list.
|
|
|
|
if config.has_section('properties'):
|
|
|
|
self.guest_props = []
|
|
|
|
for prop in config.items('properties'):
|
|
|
|
self.guest_props.append(prop[1])
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
# save the config
|
2017-01-10 15:09:11 +01:00
|
|
|
self.config = config
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
def parse_cli_args(self):
|
|
|
|
|
|
|
|
''' Command line argument processing '''
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
|
|
|
|
parser.add_argument('--debug', action='store_true', default=False,
|
2017-01-10 15:09:11 +01:00
|
|
|
help='show debug info')
|
2016-05-31 02:41:48 +02:00
|
|
|
parser.add_argument('--list', action='store_true', default=True,
|
2017-01-10 15:09:11 +01:00
|
|
|
help='List instances (default: True)')
|
2016-05-31 02:41:48 +02:00
|
|
|
parser.add_argument('--host', action='store',
|
2017-01-10 15:09:11 +01:00
|
|
|
help='Get all the variables about a specific instance')
|
2016-05-31 02:41:48 +02:00
|
|
|
parser.add_argument('--refresh-cache', action='store_true', default=False,
|
2017-01-10 15:09:11 +01:00
|
|
|
help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)')
|
2016-05-31 02:41:48 +02:00
|
|
|
parser.add_argument('--max-instances', default=None, type=int,
|
2017-01-10 15:09:11 +01:00
|
|
|
help='maximum number of instances to retrieve')
|
2016-05-31 02:41:48 +02:00
|
|
|
self.args = parser.parse_args()
|
|
|
|
|
|
|
|
def get_instances(self):
|
|
|
|
|
|
|
|
''' Get a list of vm instances with pyvmomi '''
|
|
|
|
kwargs = {'host': self.server,
|
2016-09-30 15:06:02 +02:00
|
|
|
'user': self.username,
|
|
|
|
'pwd': self.password,
|
2017-01-10 15:09:11 +01:00
|
|
|
'port': int(self.port)}
|
2016-09-30 15:06:02 +02:00
|
|
|
|
|
|
|
if hasattr(ssl, 'SSLContext') and not self.validate_certs:
|
|
|
|
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
2016-05-31 02:41:48 +02:00
|
|
|
context.verify_mode = ssl.CERT_NONE
|
|
|
|
kwargs['sslContext'] = context
|
|
|
|
|
2017-01-12 15:37:18 +01:00
|
|
|
return self._get_instances(kwargs)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
def _get_instances(self, inkwargs):
|
|
|
|
|
2016-06-08 00:10:57 +02:00
|
|
|
''' Make API calls '''
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
instances = []
|
2017-07-18 13:20:23 +02:00
|
|
|
try:
|
|
|
|
si = SmartConnect(**inkwargs)
|
|
|
|
except ssl.SSLError as connection_error:
|
|
|
|
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs:
|
|
|
|
sys.exit("Unable to connect to ESXi server due to %s, "
|
|
|
|
"please specify validate_certs=False and try again" % connection_error)
|
|
|
|
|
|
|
|
except Exception as exc:
|
|
|
|
self.debugl("Unable to connect to ESXi server due to %s" % exc)
|
|
|
|
sys.exit("Unable to connect to ESXi server due to %s" % exc)
|
2016-08-19 16:40:22 +02:00
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('retrieving all instances')
|
2016-05-31 02:41:48 +02:00
|
|
|
if not si:
|
2017-07-18 13:20:23 +02:00
|
|
|
sys.exit("Could not connect to the specified host using specified "
|
|
|
|
"username and password")
|
2016-05-31 02:41:48 +02:00
|
|
|
atexit.register(Disconnect, si)
|
|
|
|
content = si.RetrieveContent()
|
2016-08-19 16:40:22 +02:00
|
|
|
|
|
|
|
# Create a search container for virtualmachines
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('creating containerview for virtualmachines')
|
2016-08-19 16:40:22 +02:00
|
|
|
container = content.rootFolder
|
|
|
|
viewType = [vim.VirtualMachine]
|
|
|
|
recursive = True
|
|
|
|
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
|
|
|
|
children = containerView.view
|
|
|
|
for child in children:
|
|
|
|
# If requested, limit the total number of instances
|
|
|
|
if self.args.max_instances:
|
2017-01-10 15:09:11 +01:00
|
|
|
if len(instances) >= self.args.max_instances:
|
2016-08-19 16:40:22 +02:00
|
|
|
break
|
|
|
|
instances.append(child)
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl("%s total instances in container view" % len(instances))
|
|
|
|
|
|
|
|
if self.args.host:
|
|
|
|
instances = [x for x in instances if x.name == self.args.host]
|
2016-08-19 16:40:22 +02:00
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
instance_tuples = []
|
|
|
|
for instance in sorted(instances):
|
|
|
|
if self.guest_props:
|
2016-10-18 22:21:37 +02:00
|
|
|
ifacts = self.facts_from_proplist(instance)
|
|
|
|
else:
|
|
|
|
ifacts = self.facts_from_vobj(instance)
|
2016-05-31 02:41:48 +02:00
|
|
|
instance_tuples.append((instance, ifacts))
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('facts collected for all instances')
|
2017-01-12 15:37:18 +01:00
|
|
|
|
2017-07-18 13:20:23 +02:00
|
|
|
try:
|
|
|
|
cfm = content.customFieldsManager
|
|
|
|
if cfm is not None and cfm.field:
|
|
|
|
for f in cfm.field:
|
|
|
|
if f.managedObjectType == vim.VirtualMachine:
|
|
|
|
self.custom_fields[f.key] = f.name
|
|
|
|
self.debugl('%d custom fields collected' % len(self.custom_fields))
|
|
|
|
except vmodl.RuntimeFault as exc:
|
|
|
|
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
|
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
return instance_tuples
|
|
|
|
|
|
|
|
def instances_to_inventory(self, instances):
|
|
|
|
|
|
|
|
''' Convert a list of vm objects into a json compliant inventory '''
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('re-indexing instances based on ini settings')
|
2017-01-10 15:09:11 +01:00
|
|
|
inventory = VMWareInventory._empty_inventory()
|
2016-05-31 02:41:48 +02:00
|
|
|
inventory['all'] = {}
|
|
|
|
inventory['all']['hosts'] = []
|
2017-01-10 15:09:11 +01:00
|
|
|
for idx, instance in enumerate(instances):
|
2016-05-31 02:41:48 +02:00
|
|
|
# make a unique id for this object to avoid vmware's
|
|
|
|
# numerous uuid's which aren't all unique.
|
|
|
|
thisid = str(uuid.uuid4())
|
|
|
|
idata = instance[1]
|
|
|
|
|
|
|
|
# Put it in the inventory
|
|
|
|
inventory['all']['hosts'].append(thisid)
|
|
|
|
inventory['_meta']['hostvars'][thisid] = idata.copy()
|
|
|
|
inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid
|
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
# Make a map of the uuid to the alias the user wants
|
2017-01-10 15:09:11 +01:00
|
|
|
name_mapping = self.create_template_mapping(
|
|
|
|
inventory,
|
|
|
|
self.config.get('vmware', 'alias_pattern')
|
|
|
|
)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
# Make a map of the uuid to the ssh hostname the user wants
|
2017-01-10 15:09:11 +01:00
|
|
|
host_mapping = self.create_template_mapping(
|
|
|
|
inventory,
|
|
|
|
self.config.get('vmware', 'host_pattern')
|
|
|
|
)
|
2016-08-19 16:40:22 +02:00
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
# Reset the inventory keys
|
2017-01-10 15:09:11 +01:00
|
|
|
for k, v in name_mapping.items():
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2017-05-30 22:08:09 +02:00
|
|
|
if not host_mapping or k not in host_mapping:
|
2016-08-19 16:40:22 +02:00
|
|
|
continue
|
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
# set ansible_host (2.x)
|
2016-08-19 16:40:22 +02:00
|
|
|
try:
|
|
|
|
inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
|
|
|
|
# 1.9.x backwards compliance
|
|
|
|
inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
|
2017-01-10 15:09:11 +01:00
|
|
|
except Exception:
|
2016-08-19 16:40:22 +02:00
|
|
|
continue
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
if k == v:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# add new key
|
|
|
|
inventory['all']['hosts'].append(v)
|
|
|
|
inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k]
|
|
|
|
|
|
|
|
# cleanup old key
|
|
|
|
inventory['all']['hosts'].remove(k)
|
|
|
|
inventory['_meta']['hostvars'].pop(k, None)
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('pre-filtered hosts:')
|
2016-05-31 02:41:48 +02:00
|
|
|
for i in inventory['all']['hosts']:
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl(' * %s' % i)
|
2016-05-31 02:41:48 +02:00
|
|
|
# Apply host filters
|
|
|
|
for hf in self.host_filters:
|
|
|
|
if not hf:
|
|
|
|
continue
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('filter: %s' % hf)
|
2016-05-31 02:41:48 +02:00
|
|
|
filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
|
2017-01-10 15:09:11 +01:00
|
|
|
for k, v in filter_map.items():
|
2016-05-31 02:41:48 +02:00
|
|
|
if not v:
|
|
|
|
# delete this host
|
|
|
|
inventory['all']['hosts'].remove(k)
|
|
|
|
inventory['_meta']['hostvars'].pop(k, None)
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl('post-filter hosts:')
|
2016-05-31 02:41:48 +02:00
|
|
|
for i in inventory['all']['hosts']:
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl(' * %s' % i)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
# Create groups
|
|
|
|
for gbp in self.groupby_patterns:
|
|
|
|
groupby_map = self.create_template_mapping(inventory, gbp)
|
2017-01-10 15:09:11 +01:00
|
|
|
for k, v in groupby_map.items():
|
2016-05-31 02:41:48 +02:00
|
|
|
if v not in inventory:
|
|
|
|
inventory[v] = {}
|
|
|
|
inventory[v]['hosts'] = []
|
|
|
|
if k not in inventory[v]['hosts']:
|
2017-01-10 15:09:11 +01:00
|
|
|
inventory[v]['hosts'].append(k)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2017-01-12 15:37:18 +01:00
|
|
|
if self.config.get('vmware', 'groupby_custom_field'):
|
|
|
|
for k, v in inventory['_meta']['hostvars'].items():
|
|
|
|
if 'customvalue' in v:
|
|
|
|
for tv in v['customvalue']:
|
2017-07-23 03:15:46 +02:00
|
|
|
if not isinstance(tv['value'], string_types):
|
2017-01-12 15:37:18 +01:00
|
|
|
continue
|
|
|
|
|
|
|
|
newkey = None
|
|
|
|
field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key']
|
|
|
|
values = []
|
|
|
|
keylist = map(lambda x: x.strip(), tv['value'].split(','))
|
|
|
|
for kl in keylist:
|
|
|
|
try:
|
|
|
|
newkey = self.config.get('vmware', 'custom_field_group_prefix') + field_name + '_' + kl
|
|
|
|
newkey = newkey.strip()
|
|
|
|
except Exception as e:
|
|
|
|
self.debugl(e)
|
|
|
|
values.append(newkey)
|
|
|
|
for tag in values:
|
|
|
|
if not tag:
|
|
|
|
continue
|
|
|
|
if tag not in inventory:
|
|
|
|
inventory[tag] = {}
|
|
|
|
inventory[tag]['hosts'] = []
|
|
|
|
if k not in inventory[tag]['hosts']:
|
|
|
|
inventory[tag]['hosts'].append(k)
|
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
return inventory
|
|
|
|
|
|
|
|
def create_template_mapping(self, inventory, pattern, dtype='string'):
|
|
|
|
|
|
|
|
''' Return a hash of uuid to templated string from pattern '''
|
|
|
|
|
|
|
|
mapping = {}
|
2017-01-10 15:09:11 +01:00
|
|
|
for k, v in inventory['_meta']['hostvars'].items():
|
2017-03-18 02:25:00 +01:00
|
|
|
t = self.env.from_string(pattern)
|
2016-05-31 02:41:48 +02:00
|
|
|
newkey = None
|
2017-01-10 15:09:11 +01:00
|
|
|
try:
|
2016-05-31 02:41:48 +02:00
|
|
|
newkey = t.render(v)
|
|
|
|
newkey = newkey.strip()
|
|
|
|
except Exception as e:
|
2016-07-20 15:46:50 +02:00
|
|
|
self.debugl(e)
|
2016-06-08 00:10:57 +02:00
|
|
|
if not newkey:
|
|
|
|
continue
|
|
|
|
elif dtype == 'integer':
|
2016-05-31 02:41:48 +02:00
|
|
|
newkey = int(newkey)
|
|
|
|
elif dtype == 'boolean':
|
|
|
|
if newkey.lower() == 'false':
|
|
|
|
newkey = False
|
|
|
|
elif newkey.lower() == 'true':
|
2017-01-10 15:09:11 +01:00
|
|
|
newkey = True
|
2016-05-31 02:41:48 +02:00
|
|
|
elif dtype == 'string':
|
2017-01-10 15:09:11 +01:00
|
|
|
pass
|
2016-05-31 02:41:48 +02:00
|
|
|
mapping[k] = newkey
|
|
|
|
return mapping
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
def facts_from_proplist(self, vm):
|
|
|
|
'''Get specific properties instead of serializing everything'''
|
|
|
|
|
|
|
|
rdata = {}
|
|
|
|
for prop in self.guest_props:
|
|
|
|
self.debugl('getting %s property for %s' % (prop, vm.name))
|
|
|
|
key = prop
|
|
|
|
if self.lowerkeys:
|
|
|
|
key = key.lower()
|
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
if '.' not in prop:
|
2016-10-18 22:21:37 +02:00
|
|
|
# props without periods are direct attributes of the parent
|
|
|
|
rdata[key] = getattr(vm, prop)
|
|
|
|
else:
|
|
|
|
# props with periods are subkeys of parent attributes
|
|
|
|
parts = prop.split('.')
|
|
|
|
total = len(parts) - 1
|
|
|
|
|
|
|
|
# pointer to the current object
|
|
|
|
val = None
|
|
|
|
# pointer to the current result key
|
|
|
|
lastref = rdata
|
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
for idx, x in enumerate(parts):
|
2016-10-18 22:21:37 +02:00
|
|
|
|
2017-08-25 15:53:01 +02:00
|
|
|
if isinstance(val, dict):
|
|
|
|
if x in val:
|
|
|
|
val = val.get(x)
|
|
|
|
elif x.lower() in val:
|
|
|
|
val = val.get(x.lower())
|
2016-10-18 22:21:37 +02:00
|
|
|
else:
|
2017-08-25 15:53:01 +02:00
|
|
|
# if the val wasn't set yet, get it from the parent
|
|
|
|
if not val:
|
|
|
|
try:
|
|
|
|
val = getattr(vm, x)
|
|
|
|
except AttributeError as e:
|
|
|
|
self.debugl(e)
|
|
|
|
else:
|
|
|
|
# in a subkey, get the subprop from the previous attrib
|
|
|
|
try:
|
|
|
|
val = getattr(val, x)
|
|
|
|
except AttributeError as e:
|
|
|
|
self.debugl(e)
|
|
|
|
|
|
|
|
# make sure it serializes
|
|
|
|
val = self._process_object_types(val)
|
2016-10-18 22:21:37 +02:00
|
|
|
|
|
|
|
# lowercase keys if requested
|
|
|
|
if self.lowerkeys:
|
|
|
|
x = x.lower()
|
|
|
|
|
|
|
|
# change the pointer or set the final value
|
|
|
|
if idx != total:
|
|
|
|
if x not in lastref:
|
|
|
|
lastref[x] = {}
|
|
|
|
lastref = lastref[x]
|
|
|
|
else:
|
|
|
|
lastref[x] = val
|
|
|
|
|
|
|
|
return rdata
|
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
def facts_from_vobj(self, vobj, level=0):
|
|
|
|
|
|
|
|
''' Traverse a VM object and return a json compliant data structure '''
|
|
|
|
|
|
|
|
# pyvmomi objects are not yet serializable, but may be one day ...
|
|
|
|
# https://github.com/vmware/pyvmomi/issues/21
|
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
# WARNING:
|
|
|
|
# Accessing an object attribute will trigger a SOAP call to the remote.
|
|
|
|
# Increasing the attributes collected or the depth of recursion greatly
|
|
|
|
# increases runtime duration and potentially memory+network utilization.
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
if level == 0:
|
|
|
|
try:
|
2016-10-18 22:21:37 +02:00
|
|
|
self.debugl("get facts for %s" % vobj.name)
|
2016-08-19 16:40:22 +02:00
|
|
|
except Exception as e:
|
|
|
|
self.debugl(e)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
rdata = {}
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
methods = dir(vobj)
|
|
|
|
methods = [str(x) for x in methods if not x.startswith('_')]
|
2017-01-10 15:09:11 +01:00
|
|
|
methods = [x for x in methods if x not in self.bad_types]
|
2016-10-18 22:21:37 +02:00
|
|
|
methods = [x for x in methods if not x.lower() in self.skip_keys]
|
2016-08-19 16:40:22 +02:00
|
|
|
methods = sorted(methods)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
for method in methods:
|
|
|
|
# Attempt to get the method, skip on fail
|
|
|
|
try:
|
|
|
|
methodToCall = getattr(vobj, method)
|
|
|
|
except Exception as e:
|
|
|
|
continue
|
2016-10-18 22:21:37 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
# Skip callable methods
|
|
|
|
if callable(methodToCall):
|
|
|
|
continue
|
2016-10-18 22:21:37 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
if self.lowerkeys:
|
|
|
|
method = method.lower()
|
2016-10-18 22:21:37 +02:00
|
|
|
|
|
|
|
rdata[method] = self._process_object_types(
|
2017-01-10 15:09:11 +01:00
|
|
|
methodToCall,
|
|
|
|
thisvm=vobj,
|
|
|
|
inkey=method,
|
|
|
|
)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
return rdata
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2017-08-25 15:53:01 +02:00
|
|
|
def _process_object_types(self, vobj, thisvm=None, inkey='', level=0):
|
2016-08-19 16:40:22 +02:00
|
|
|
''' Serialize an object '''
|
|
|
|
rdata = {}
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2017-01-10 15:09:11 +01:00
|
|
|
if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]:
|
|
|
|
return rdata
|
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
if vobj is None:
|
|
|
|
rdata = None
|
2016-10-18 22:21:37 +02:00
|
|
|
elif type(vobj) in self.vimTable:
|
|
|
|
rdata = {}
|
|
|
|
for key in self.vimTable[type(vobj)]:
|
2017-02-21 14:53:40 +01:00
|
|
|
try:
|
|
|
|
rdata[key] = getattr(vobj, key)
|
|
|
|
except Exception as e:
|
|
|
|
self.debugl(e)
|
2016-10-18 22:21:37 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
elif issubclass(type(vobj), str) or isinstance(vobj, str):
|
2016-09-16 23:09:42 +02:00
|
|
|
if vobj.isalnum():
|
|
|
|
rdata = vobj
|
|
|
|
else:
|
|
|
|
rdata = vobj.decode('ascii', 'ignore')
|
2016-08-19 16:40:22 +02:00
|
|
|
elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
|
|
|
|
rdata = vobj
|
2017-07-23 03:15:46 +02:00
|
|
|
elif issubclass(type(vobj), integer_types) or isinstance(vobj, integer_types):
|
2016-08-19 16:40:22 +02:00
|
|
|
rdata = vobj
|
|
|
|
elif issubclass(type(vobj), float) or isinstance(vobj, float):
|
|
|
|
rdata = vobj
|
|
|
|
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
|
|
|
|
rdata = []
|
|
|
|
try:
|
|
|
|
vobj = sorted(vobj)
|
2017-01-10 15:09:11 +01:00
|
|
|
except Exception:
|
2016-08-19 16:40:22 +02:00
|
|
|
pass
|
2016-10-18 22:21:37 +02:00
|
|
|
|
|
|
|
for idv, vii in enumerate(vobj):
|
2017-01-10 15:09:11 +01:00
|
|
|
if level + 1 <= self.maxlevel:
|
2016-10-18 22:21:37 +02:00
|
|
|
vid = self._process_object_types(
|
2017-01-10 15:09:11 +01:00
|
|
|
vii,
|
|
|
|
thisvm=thisvm,
|
|
|
|
inkey=inkey + '[' + str(idv) + ']',
|
|
|
|
level=(level + 1)
|
|
|
|
)
|
2016-10-18 22:21:37 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
if vid:
|
|
|
|
rdata.append(vid)
|
2016-10-18 22:21:37 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
elif issubclass(type(vobj), dict):
|
|
|
|
pass
|
2016-10-18 22:21:37 +02:00
|
|
|
|
2016-08-19 16:40:22 +02:00
|
|
|
elif issubclass(type(vobj), object):
|
2016-05-31 02:41:48 +02:00
|
|
|
methods = dir(vobj)
|
|
|
|
methods = [str(x) for x in methods if not x.startswith('_')]
|
2017-01-10 15:09:11 +01:00
|
|
|
methods = [x for x in methods if x not in self.bad_types]
|
2017-01-18 14:36:11 +01:00
|
|
|
methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys]
|
2016-05-31 02:41:48 +02:00
|
|
|
methods = sorted(methods)
|
|
|
|
|
|
|
|
for method in methods:
|
|
|
|
# Attempt to get the method, skip on fail
|
|
|
|
try:
|
|
|
|
methodToCall = getattr(vobj, method)
|
|
|
|
except Exception as e:
|
|
|
|
continue
|
2017-01-10 15:09:11 +01:00
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
if callable(methodToCall):
|
|
|
|
continue
|
2017-01-10 15:09:11 +01:00
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
if self.lowerkeys:
|
|
|
|
method = method.lower()
|
2017-01-10 15:09:11 +01:00
|
|
|
if level + 1 <= self.maxlevel:
|
2017-02-21 14:51:23 +01:00
|
|
|
try:
|
|
|
|
rdata[method] = self._process_object_types(
|
|
|
|
methodToCall,
|
|
|
|
thisvm=thisvm,
|
|
|
|
inkey=inkey + '.' + method,
|
|
|
|
level=(level + 1)
|
|
|
|
)
|
|
|
|
except vim.fault.NoPermission:
|
|
|
|
self.debugl("Skipping method %s (NoPermission)" % method)
|
2016-05-31 02:41:48 +02:00
|
|
|
else:
|
2016-08-19 16:40:22 +02:00
|
|
|
pass
|
2016-05-31 02:41:48 +02:00
|
|
|
|
2016-06-08 00:10:57 +02:00
|
|
|
return rdata
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
def get_host_info(self, host):
|
2017-01-10 15:09:11 +01:00
|
|
|
|
2016-05-31 02:41:48 +02:00
|
|
|
''' Return hostvars for a single host '''
|
|
|
|
|
2016-10-18 22:21:37 +02:00
|
|
|
if host in self.inventory['_meta']['hostvars']:
|
|
|
|
return self.inventory['_meta']['hostvars'][host]
|
|
|
|
elif self.args.host and self.inventory['_meta']['hostvars']:
|
|
|
|
match = None
|
2017-08-24 16:04:52 +02:00
|
|
|
for k, v in self.inventory['_meta']['hostvars'].items():
|
2016-10-18 22:21:37 +02:00
|
|
|
if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
|
|
|
|
match = k
|
|
|
|
break
|
|
|
|
if match:
|
|
|
|
return self.inventory['_meta']['hostvars'][match]
|
|
|
|
else:
|
|
|
|
raise VMwareMissingHostException('%s not found' % host)
|
|
|
|
else:
|
|
|
|
raise VMwareMissingHostException('%s not found' % host)
|
2016-05-31 02:41:48 +02:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
# Run the script
|
|
|
|
print(VMWareInventory().show())
|