mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
[proxmox_vm_info] Add ability to retrieve config (#7485)
* feat: add ability to retrieve config Light refactor of get_vms_from_nodes function. Added ability to retrieve configuration for existing machines (current or pending). * Add changelog fragment * Add changelog fragment (newline missed) * Update changelogs/fragments/7485-proxmox_vm_info-config.yml Co-authored-by: Felix Fontein <felix@fontein.de> * Apply suggestions from code review Co-authored-by: Felix Fontein <felix@fontein.de> * Replaced two bool options with one three-state option * Module args for the three-state option * Remove trailing newline * Make use of dict instead of list. Fix uncalled 'get config for lxc'. * Sanity tests * A couple of unit tests fixed * Unit tests fixed * Unit tests for p2.7 fixed. Test for config parameter added. --------- Co-authored-by: Felix Fontein <felix@fontein.de>
This commit is contained in:
parent
a599afa384
commit
68051774d8
3 changed files with 101 additions and 52 deletions
2
changelogs/fragments/7485-proxmox_vm_info-config.yml
Normal file
2
changelogs/fragments/7485-proxmox_vm_info-config.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
minor_changes:
|
||||
- proxmox_vm_info - add ability to retrieve configuration info (https://github.com/ansible-collections/community.general/pull/7485).
|
|
@ -41,6 +41,19 @@ options:
|
|||
- Restrict results to a specific virtual machine(s) by using their name.
|
||||
- If VM(s) with the specified name do not exist in a cluster then the resulting list will be empty.
|
||||
type: str
|
||||
config:
|
||||
description:
|
||||
- Whether to retrieve the VM configuration along with VM status.
|
||||
- If set to V(none) (default), no configuration will be returned.
|
||||
- If set to V(current), the current running configuration will be returned.
|
||||
- If set to V(pending), the configuration with pending changes applied will be returned.
|
||||
type: str
|
||||
choices:
|
||||
- none
|
||||
- current
|
||||
- pending
|
||||
default: none
|
||||
version_added: 8.1.0
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
- community.general.attributes
|
||||
|
@ -73,7 +86,7 @@ EXAMPLES = """
|
|||
type: qemu
|
||||
vmid: 101
|
||||
|
||||
- name: Retrieve information about specific VM by name
|
||||
- name: Retrieve information about specific VM by name and get current configuration
|
||||
community.general.proxmox_vm_info:
|
||||
api_host: proxmoxhost
|
||||
api_user: root@pam
|
||||
|
@ -81,6 +94,7 @@ EXAMPLES = """
|
|||
node: node01
|
||||
type: lxc
|
||||
name: lxc05.home.arpa
|
||||
config: current
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
|
@ -154,42 +168,46 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
|
|||
msg="Failed to retrieve VMs information from cluster resources: %s" % e
|
||||
)
|
||||
|
||||
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, name=None, node=None):
|
||||
vms = []
|
||||
for vm in vms_unfiltered:
|
||||
if (
|
||||
type != vm["type"]
|
||||
or (node and vm["node"] != node)
|
||||
or (vmid and int(vm["vmid"]) != vmid)
|
||||
or (name is not None and vm["name"] != name)
|
||||
):
|
||||
continue
|
||||
vms.append(vm)
|
||||
nodes = frozenset([vm["node"] for vm in vms])
|
||||
for node in nodes:
|
||||
if type == "qemu":
|
||||
vms_from_nodes = self.proxmox_api.nodes(node).qemu().get()
|
||||
else:
|
||||
vms_from_nodes = self.proxmox_api.nodes(node).lxc().get()
|
||||
for vmn in vms_from_nodes:
|
||||
for vm in vms:
|
||||
if int(vm["vmid"]) == int(vmn["vmid"]):
|
||||
vm.update(vmn)
|
||||
vm["vmid"] = int(vm["vmid"])
|
||||
vm["template"] = proxmox_to_ansible_bool(vm["template"])
|
||||
break
|
||||
def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None):
|
||||
# Leave in dict only machines that user wants to know about
|
||||
filtered_vms = {
|
||||
vm: info for vm, info in cluster_machines.items() if not (
|
||||
type != info["type"]
|
||||
or (node and info["node"] != node)
|
||||
or (vmid and int(info["vmid"]) != vmid)
|
||||
or (name is not None and info["name"] != name)
|
||||
)
|
||||
}
|
||||
# Get list of unique node names and loop through it to get info about machines.
|
||||
nodes = frozenset([info["node"] for vm, info in filtered_vms.items()])
|
||||
for this_node in nodes:
|
||||
# "type" is mandatory and can have only values of "qemu" or "lxc". Seems that use of reflection is safe.
|
||||
call_vm_getter = getattr(self.proxmox_api.nodes(this_node), type)
|
||||
vms_from_this_node = call_vm_getter().get()
|
||||
for detected_vm in vms_from_this_node:
|
||||
this_vm_id = int(detected_vm["vmid"])
|
||||
desired_vm = filtered_vms.get(this_vm_id, None)
|
||||
if desired_vm:
|
||||
desired_vm.update(detected_vm)
|
||||
desired_vm["vmid"] = this_vm_id
|
||||
desired_vm["template"] = proxmox_to_ansible_bool(desired_vm["template"])
|
||||
# When user wants to retrieve the VM configuration
|
||||
if config != "none":
|
||||
# pending = 0, current = 1
|
||||
config_type = 0 if config == "pending" else 1
|
||||
# GET /nodes/{node}/qemu/{vmid}/config current=[0/1]
|
||||
desired_vm["config"] = call_vm_getter(this_vm_id).config().get(current=config_type)
|
||||
return filtered_vms
|
||||
|
||||
return vms
|
||||
|
||||
def get_qemu_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||
def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None):
|
||||
try:
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, name, node)
|
||||
return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
|
||||
|
||||
def get_lxc_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||
def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None):
|
||||
try:
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, name, node)
|
||||
return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
|
||||
|
||||
|
@ -203,6 +221,10 @@ def main():
|
|||
),
|
||||
vmid=dict(type="int", required=False),
|
||||
name=dict(type="str", required=False),
|
||||
config=dict(
|
||||
type="str", choices=["none", "current", "pending"],
|
||||
default="none", required=False
|
||||
),
|
||||
)
|
||||
module_args.update(vm_info_args)
|
||||
|
||||
|
@ -218,6 +240,7 @@ def main():
|
|||
type = module.params["type"]
|
||||
vmid = module.params["vmid"]
|
||||
name = module.params["name"]
|
||||
config = module.params["config"]
|
||||
|
||||
result = dict(changed=False)
|
||||
|
||||
|
@ -225,21 +248,18 @@ def main():
|
|||
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
|
||||
|
||||
vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
|
||||
vms = []
|
||||
cluster_machines = {int(machine["vmid"]): machine for machine in vms_cluster_resources}
|
||||
vms = {}
|
||||
|
||||
if type == "lxc":
|
||||
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||
vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config)
|
||||
elif type == "qemu":
|
||||
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, name, node)
|
||||
vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config)
|
||||
else:
|
||||
vms = proxmox.get_qemu_vms(
|
||||
vms_cluster_resources,
|
||||
vmid,
|
||||
name,
|
||||
node,
|
||||
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||
vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config)
|
||||
vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config))
|
||||
|
||||
result["proxmox_vms"] = vms
|
||||
result["proxmox_vms"] = [info for vm, info in sorted(vms.items())]
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
|
|
|
@ -414,7 +414,7 @@ EXPECTED_VMS_OUTPUT = [
|
|||
]
|
||||
|
||||
|
||||
def get_module_args(type="all", node=None, vmid=None, name=None):
|
||||
def get_module_args(type="all", node=None, vmid=None, name=None, config="none"):
|
||||
return {
|
||||
"api_host": "host",
|
||||
"api_user": "user",
|
||||
|
@ -423,6 +423,7 @@ def get_module_args(type="all", node=None, vmid=None, name=None):
|
|||
"type": type,
|
||||
"vmid": vmid,
|
||||
"name": name,
|
||||
"config": config,
|
||||
}
|
||||
|
||||
|
||||
|
@ -460,23 +461,21 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
|||
def test_get_lxc_vms_information(self):
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
set_module_args(get_module_args(type="lxc"))
|
||||
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "lxc"]
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["changed"] is False
|
||||
assert result["proxmox_vms"] == [
|
||||
vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "lxc"
|
||||
]
|
||||
assert result["proxmox_vms"] == expected_output
|
||||
|
||||
def test_get_qemu_vms_information(self):
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
set_module_args(get_module_args(type="qemu"))
|
||||
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "qemu"]
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["proxmox_vms"] == [
|
||||
vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "qemu"
|
||||
]
|
||||
assert result["proxmox_vms"] == expected_output
|
||||
|
||||
def test_get_all_vms_information(self):
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
|
@ -566,7 +565,7 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
|||
assert result["proxmox_vms"] == expected_output
|
||||
assert len(result["proxmox_vms"]) == 2
|
||||
|
||||
def test_get_multiple_vms_with_the_same_name(self):
|
||||
def test_get_vm_with_an_empty_name(self):
|
||||
name = ""
|
||||
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
||||
{"name": name, "vmid": "105"},
|
||||
|
@ -665,13 +664,12 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
|||
)
|
||||
|
||||
def test_module_fail_when_node_does_not_exist(self):
|
||||
self.connect_mock.return_value.nodes.get.return_value = []
|
||||
with pytest.raises(AnsibleFailJson) as exc_info:
|
||||
set_module_args(get_module_args(type="all", node=NODE1))
|
||||
set_module_args(get_module_args(type="all", node="NODE3"))
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["msg"] == "Node pve doesn't exist in PVE cluster"
|
||||
assert result["msg"] == "Node NODE3 doesn't exist in PVE cluster"
|
||||
|
||||
def test_call_to_get_vmid_is_not_used_when_vmid_provided(self):
|
||||
with patch(
|
||||
|
@ -685,3 +683,32 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
|||
self.module.main()
|
||||
|
||||
assert get_vmid_mock.call_count == 0
|
||||
|
||||
def test_config_returned_when_specified_qemu_vm_with_config(self):
|
||||
config_vm_value = {
|
||||
'scsi0': 'local-lvm:vm-101-disk-0,iothread=1,size=32G',
|
||||
'net0': 'virtio=4E:79:9F:A8:EE:E4,bridge=vmbr0,firewall=1',
|
||||
'scsihw': 'virtio-scsi-single',
|
||||
'cores': 1,
|
||||
'name': 'test1',
|
||||
'ostype': 'l26',
|
||||
'boot': 'order=scsi0;ide2;net0',
|
||||
'memory': 2048,
|
||||
'sockets': 1,
|
||||
}
|
||||
(self.connect_mock.return_value.nodes.return_value.qemu.return_value.
|
||||
config.return_value.get.return_value) = config_vm_value
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
vmid = 101
|
||||
set_module_args(get_module_args(
|
||||
type="qemu",
|
||||
vmid=vmid,
|
||||
config="current",
|
||||
))
|
||||
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["vmid"] == vmid]
|
||||
expected_output[0]["config"] = config_vm_value
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["proxmox_vms"] == expected_output
|
||||
|
|
Loading…
Reference in a new issue