From 761171b532b84c31a4e4953b542e80d75b0cb47c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Beno=C3=AEt?= Date: Tue, 15 Mar 2022 12:22:43 +0100 Subject: [PATCH] Proxmox inventory filters (#4352) * Proxmox inventory plugin - Initial implementation of filters * This is an attempt at implementing something that would satisfy issue #3553 * A rather massive code rewrite was needed as adding the host to the inventory, setting its variables and adding it to various groups used to be done as soon as the information became available. This is not possible when it is not known whether the host should be added to the inventory before all data has been gathered. * The code for both LXC containers and Qemu VMs was refactored into a single loop. * Helper functions to generate group and fact names were added. * Proxmox inventory plugin - Warnings for filter errors * When an error occurs while compositing a filter's value and strict mode is disabled, display a warning. * Proxmox inventory plugin - Fixed pool groups building * Hosts that were excluded by the host filters were still being added to pool groups, causing errors. * Proxmox inventory plugin - Refactoring * Split off the VM/container handling code from the `_populate()` method * Split off pool group attribution from the `_populate()` method * Proxmox inventory filters - Changelog fragment * proxmox inventory - Simplify _can_add_host() method Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../4352-proxmox-inventory-filters.yml | 4 + plugins/inventory/proxmox.py | 262 ++++++++++-------- tests/unit/plugins/inventory/test_proxmox.py | 8 +- 3 files changed, 150 insertions(+), 124 deletions(-) create mode 100644 changelogs/fragments/4352-proxmox-inventory-filters.yml diff --git a/changelogs/fragments/4352-proxmox-inventory-filters.yml b/changelogs/fragments/4352-proxmox-inventory-filters.yml new file mode 100644 index 0000000000..713e8702d3 --- /dev/null +++ b/changelogs/fragments/4352-proxmox-inventory-filters.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - proxmox inventory plugin - add support for client-side jinja filters + (https://github.com/ansible-collections/community.general/issues/3553). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index f18a058382..67fe2b3a6c 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -77,6 +77,12 @@ DOCUMENTATION = ''' - When set to C(true) (default), will use the first available interface. This can be different from what you expect. default: true type: bool + filters: + version_added: 4.6.0 + description: A list of Jinja templates that allow filtering hosts. + type: list + elements: str + default: [] strict: version_added: 2.5.0 compose: @@ -132,13 +138,16 @@ compose: "my_var_2_value" ''' +import itertools import re from ansible.module_utils.common._collections_compat import MutableMapping from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.utils.display import Display from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -151,6 +160,8 @@ try: except ImportError: HAS_REQUESTS = False +display = Display() + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): ''' Host inventory parser for ansible using Proxmox as source. ''' @@ -291,28 +302,19 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return result - def _get_vm_config(self, node, vmid, vmtype, name): + def _get_vm_config(self, properties, node, vmid, vmtype, name): ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) - node_key = 'node' - node_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), node_key.lower())) - self.inventory.set_variable(name, node_key, node) - - vmid_key = 'vmid' - vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower())) - self.inventory.set_variable(name, vmid_key, vmid) - - vmtype_key = 'vmtype' - vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower())) - self.inventory.set_variable(name, vmtype_key, vmtype) + properties[self._fact('node')] = node + properties[self._fact('vmid')] = vmid + properties[self._fact('vmtype')] = vmtype plaintext_configs = [ 'tags', ] for config in ret: - key = config - key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower())) + key = self._fact(config) value = ret[config] try: # fixup disk images as they have no key @@ -322,16 +324,15 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # Additional field containing parsed tags as list if config == 'tags': parsed_key = self.to_safe('%s%s' % (key, "_parsed")) - parsed_value = [tag.strip() for tag in value.split(",")] - self.inventory.set_variable(name, parsed_key, parsed_value) + properties[parsed_key] = [tag.strip() for tag in value.split(",")] # The first field in the agent string tells you whether the agent is enabled # the rest of the comma separated string is extra config for the agent if config == 'agent' and int(value.split(',')[0]): - agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) if agent_iface_value: - self.inventory.set_variable(name, agent_iface_key, agent_iface_value) + agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) + properties[agent_iface_key] = agent_iface_value if not (isinstance(value, int) or ',' not in value): # split off strings with commas to a dict @@ -341,26 +342,18 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): except Exception: continue - self.inventory.set_variable(name, key, value) + properties[key] = value except NameError: return None - def _get_vm_status(self, node, vmid, vmtype, name): + def _get_vm_status(self, properties, node, vmid, vmtype, name): ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid)) + properties[self._fact('status')] = ret['status'] - status = ret['status'] - status_key = 'status' - status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower())) - self.inventory.set_variable(name, status_key, status) - - def _get_vm_snapshots(self, node, vmid, vmtype, name): + def _get_vm_snapshots(self, properties, node, vmid, vmtype, name): ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) - - snapshots_key = 'snapshots' - snapshots_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), snapshots_key.lower())) - snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] - self.inventory.set_variable(name, snapshots_key, snapshots) + properties[self._fact('snapshots')] = snapshots def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups @@ -370,109 +363,130 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): regex = r"[^A-Za-z0-9\_]" return re.sub(regex, "_", word.replace(" ", "")) - def _apply_constructable(self, name, variables): - strict = self.get_option('strict') - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict) - self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) + def _fact(self, name): + '''Generate a fact's full name from the common prefix and a name.''' + return self.to_safe('%s%s' % (self.facts_prefix, name.lower())) + + def _group(self, name): + '''Generate a group's full name from the common prefix and a name.''' + return self.to_safe('%s%s' % (self.group_prefix, name.lower())) + + def _can_add_host(self, name, properties): + '''Ensure that a host satisfies all defined hosts filters. If strict mode is + enabled, any error during host filter compositing will lead to an AnsibleError + being raised, otherwise the filter will be ignored. + ''' + for host_filter in self.host_filters: + try: + if not self._compose(host_filter, properties): + return False + except Exception as e: # pylint: disable=broad-except + message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e)) + if self.strict: + raise AnsibleError(message) + display.warning(message) + return True + + def _add_host(self, name, variables): + self.inventory.add_host(name) + for k, v in variables.items(): + self.inventory.set_variable(name, k, v) + variables = self.inventory.get_host(name).get_vars() + self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict) + self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict) + + def _handle_item(self, node, ittype, item): + '''Handle an item from the list of LXC containers and Qemu VM. The + return value will be either None if the item was skipped or the name of + the item if it was added to the inventory.''' + if item.get('template'): + return None + + properties = dict() + name, vmid = item['name'], item['vmid'] + + # get status, config and snapshots if want_facts == True + if self.get_option('want_facts'): + self._get_vm_status(properties, node, vmid, ittype, name) + self._get_vm_config(properties, node, vmid, ittype, name) + self._get_vm_snapshots(properties, node, vmid, ittype, name) + + # ensure the host satisfies filters + if not self._can_add_host(name, properties): + return None + + # add the host to the inventory + self._add_host(name, properties) + node_type_group = self._group('%s_%s' % (node, ittype)) + self.inventory.add_child(self._group('all_' + ittype), name) + self.inventory.add_child(node_type_group, name) + if item['status'] == 'stopped': + self.inventory.add_child(self._group('all_stopped'), name) + elif item['status'] == 'running': + self.inventory.add_child(self._group('all_running'), name) + + return name + + def _populate_pool_groups(self, added_hosts): + '''Generate groups from Proxmox resource pools, ignoring VMs and + containers that were skipped.''' + for pool in self._get_pools(): + poolid = pool.get('poolid') + if not poolid: + continue + pool_group = self._group('pool_' + poolid) + self.inventory.add_group(pool_group) + + for member in self._get_members_per_pool(poolid): + name = member.get('name') + if name and name in added_hosts: + self.inventory.add_child(pool_group, name) def _populate(self): - self._get_auth() + # create common groups + self.inventory.add_group(self._group('all_lxc')) + self.inventory.add_group(self._group('all_qemu')) + self.inventory.add_group(self._group('all_running')) + self.inventory.add_group(self._group('all_stopped')) + nodes_group = self._group('nodes') + self.inventory.add_group(nodes_group) # gather vm's on nodes + self._get_auth() + hosts = [] for node in self._get_nodes(): - # FIXME: this can probably be cleaner - # create groups - lxc_group = 'all_lxc' - lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), lxc_group.lower())) - self.inventory.add_group(lxc_group) - qemu_group = 'all_qemu' - qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), qemu_group.lower())) - self.inventory.add_group(qemu_group) - nodes_group = 'nodes' - nodes_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), nodes_group.lower())) - self.inventory.add_group(nodes_group) - running_group = 'all_running' - running_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), running_group.lower())) - self.inventory.add_group(running_group) - stopped_group = 'all_stopped' - stopped_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), stopped_group.lower())) - self.inventory.add_group(stopped_group) + if not node.get('node'): + continue - if node.get('node'): - self.inventory.add_host(node['node']) + self.inventory.add_host(node['node']) + if node['type'] == 'node': + self.inventory.add_child(nodes_group, node['node']) - if node['type'] == 'node': - self.inventory.add_child(nodes_group, node['node']) + if node['status'] == 'offline': + continue - if node['status'] == 'offline': - continue + # get node IP address + if self.get_option("want_proxmox_nodes_ansible_host"): + ip = self._get_node_ip(node['node']) + self.inventory.set_variable(node['node'], 'ansible_host', ip) - # get node IP address - if self.get_option("want_proxmox_nodes_ansible_host"): - ip = self._get_node_ip(node['node']) - self.inventory.set_variable(node['node'], 'ansible_host', ip) + # add LXC/Qemu groups for the node + for ittype in ('lxc', 'qemu'): + node_type_group = self._group('%s_%s' % (node['node'], ittype)) + self.inventory.add_group(node_type_group) - # get LXC containers for this node - node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower())) - self.inventory.add_group(node_lxc_group) - for lxc in self._get_lxc_per_node(node['node']): - self.inventory.add_host(lxc['name']) - self.inventory.add_child(lxc_group, lxc['name']) - self.inventory.add_child(node_lxc_group, lxc['name']) - - # get LXC status when want_facts == True - if self.get_option('want_facts'): - self._get_vm_status(node['node'], lxc['vmid'], 'lxc', lxc['name']) - if lxc['status'] == 'stopped': - self.inventory.add_child(stopped_group, lxc['name']) - elif lxc['status'] == 'running': - self.inventory.add_child(running_group, lxc['name']) - - # get LXC config and snapshots for facts - if self.get_option('want_facts'): - self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name']) - self._get_vm_snapshots(node['node'], lxc['vmid'], 'lxc', lxc['name']) - - self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars()) - - # get QEMU vm's for this node - node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower())) - self.inventory.add_group(node_qemu_group) - for qemu in self._get_qemu_per_node(node['node']): - if qemu.get('template'): - continue - - self.inventory.add_host(qemu['name']) - self.inventory.add_child(qemu_group, qemu['name']) - self.inventory.add_child(node_qemu_group, qemu['name']) - - # get QEMU status - self._get_vm_status(node['node'], qemu['vmid'], 'qemu', qemu['name']) - if qemu['status'] == 'stopped': - self.inventory.add_child(stopped_group, qemu['name']) - elif qemu['status'] == 'running': - self.inventory.add_child(running_group, qemu['name']) - - # get QEMU config and snapshots for facts - if self.get_option('want_facts'): - self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name']) - self._get_vm_snapshots(node['node'], qemu['vmid'], 'qemu', qemu['name']) - - self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars()) + # get LXC containers and Qemu VMs for this node + lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node'])) + qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node'])) + for ittype, item in itertools.chain(lxc_objects, qemu_objects): + name = self._handle_item(node['node'], ittype, item) + if name is not None: + hosts.append(name) # gather vm's in pools - for pool in self._get_pools(): - if pool.get('poolid'): - pool_group = 'pool_' + pool['poolid'] - pool_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), pool_group.lower())) - self.inventory.add_group(pool_group) - - for member in self._get_members_per_pool(pool['poolid']): - if member.get('name'): - if not member.get('template'): - self.inventory.add_child(pool_group, member['name']) + self._populate_pool_groups(hosts) def parse(self, inventory, loader, path, cache=True): if not HAS_REQUESTS: @@ -484,12 +498,16 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # read config from file, this sets 'options' self._read_config_data(path) - # get connection host + # read options self.proxmox_url = self.get_option('url').rstrip('/') self.proxmox_user = self.get_option('user') self.proxmox_password = self.get_option('password') self.cache_key = self.get_cache_key(path) self.use_cache = cache and self.get_option('cache') + self.host_filters = self.get_option('filters') + self.group_prefix = self.get_option('group_prefix') + self.facts_prefix = self.get_option('facts_prefix') + self.strict = self.get_option('strict') # actually populate inventory self._populate() diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index 12927551f8..ae29ab0a35 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -522,7 +522,7 @@ def get_json(url): } -def get_vm_snapshots(node, vmtype, vmid, name): +def get_vm_snapshots(node, properties, vmtype, vmid, name): return [ {"description": "", "name": "clean", @@ -537,7 +537,7 @@ def get_vm_snapshots(node, vmtype, vmid, name): }] -def get_vm_status(node, vmtype, vmid, name): +def get_vm_status(properties, node, vmtype, vmid, name): return True @@ -559,6 +559,9 @@ def test_populate(inventory, mocker): inventory.proxmox_user = 'root@pam' inventory.proxmox_password = 'password' inventory.proxmox_url = 'https://localhost:8006' + inventory.group_prefix = 'proxmox_' + inventory.facts_prefix = 'proxmox_' + inventory.strict = False # bypass authentication and API fetch calls inventory._get_auth = mocker.MagicMock(side_effect=get_auth) @@ -566,6 +569,7 @@ def test_populate(inventory, mocker): inventory._get_vm_status = mocker.MagicMock(side_effect=get_vm_status) inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots) inventory.get_option = mocker.MagicMock(side_effect=get_option) + inventory._can_add_host = mocker.MagicMock(return_value=True) inventory._populate() # get different hosts