mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
[PR #7049/2089769c backport][stable-7] [proxmox_vm_info] Return empty list when requested VM doesn't exist (#7079)
[proxmox_vm_info] Return empty list when requested VM doesn't exist (#7049)
* [proxmox_vm_info] Return empty list when requested VM doesn't exist
* Update documentation
* Add changelog fragment
* Address review comments
* Allow to filter by empty name
* Update plugins/modules/proxmox_vm_info.py
Co-authored-by: Felix Fontein <felix@fontein.de>
---------
Co-authored-by: Felix Fontein <felix@fontein.de>
(cherry picked from commit 2089769ccc
)
Co-authored-by: Sergei Antipov <greendayonfire@gmail.com>
This commit is contained in:
parent
7ebb301930
commit
1a801323a8
3 changed files with 185 additions and 42 deletions
|
@ -0,0 +1,2 @@
|
|||
minor_changes:
|
||||
- proxmox_vm_info - non-existing provided by name/vmid VM would return empty results instead of failing (https://github.com/ansible-collections/community.general/pull/7049).
|
|
@ -34,11 +34,12 @@ options:
|
|||
vmid:
|
||||
description:
|
||||
- Restrict results to a specific virtual machine by using its ID.
|
||||
- If VM with the specified vmid does not exist in a cluster then resulting list will be empty.
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- Restrict results to a specific virtual machine by using its name.
|
||||
- If multiple virtual machines have the same name then vmid must be used instead.
|
||||
- Restrict results to a specific virtual machine(s) by using their name.
|
||||
- If VM(s) with the specified name do not exist in a cluster then the resulting list will be empty.
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.general.proxmox.documentation
|
||||
|
@ -153,13 +154,14 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
|
|||
msg="Failed to retrieve VMs information from cluster resources: %s" % e
|
||||
)
|
||||
|
||||
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, node=None):
|
||||
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, name=None, node=None):
|
||||
vms = []
|
||||
for vm in vms_unfiltered:
|
||||
if (
|
||||
type != vm["type"]
|
||||
or (node and vm["node"] != node)
|
||||
or (vmid and int(vm["vmid"]) != vmid)
|
||||
or (name is not None and vm["name"] != name)
|
||||
):
|
||||
continue
|
||||
vms.append(vm)
|
||||
|
@ -179,15 +181,15 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
|
|||
|
||||
return vms
|
||||
|
||||
def get_qemu_vms(self, vms_unfiltered, vmid=None, node=None):
|
||||
def get_qemu_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||
try:
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, node)
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, name, node)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
|
||||
|
||||
def get_lxc_vms(self, vms_unfiltered, vmid=None, node=None):
|
||||
def get_lxc_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||
try:
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, node)
|
||||
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, name, node)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
|
||||
|
||||
|
@ -222,30 +224,23 @@ def main():
|
|||
if node and proxmox.get_node(node) is None:
|
||||
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
|
||||
|
||||
if not vmid and name:
|
||||
vmid = int(proxmox.get_vmid(name, ignore_missing=False))
|
||||
|
||||
vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
|
||||
vms = None
|
||||
vms = []
|
||||
|
||||
if type == "lxc":
|
||||
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
|
||||
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||
elif type == "qemu":
|
||||
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, node)
|
||||
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, name, node)
|
||||
else:
|
||||
vms = proxmox.get_qemu_vms(
|
||||
vms_cluster_resources, vmid, node
|
||||
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
|
||||
vms_cluster_resources,
|
||||
vmid,
|
||||
name,
|
||||
node,
|
||||
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||
|
||||
if vms or vmid is None:
|
||||
result["proxmox_vms"] = vms
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
if node is None:
|
||||
result["msg"] = "VM with vmid %s doesn't exist in cluster" % (vmid)
|
||||
else:
|
||||
result["msg"] = "VM with vmid %s doesn't exist on node %s" % (vmid, node)
|
||||
module.fail_json(**result)
|
||||
result["proxmox_vms"] = vms
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -113,6 +113,48 @@ RAW_CLUSTER_OUTPUT = [
|
|||
"uptime": 0,
|
||||
"vmid": 103,
|
||||
},
|
||||
{
|
||||
"cpu": 0,
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"id": "lxc/104",
|
||||
"maxcpu": 2,
|
||||
"maxdisk": 10737418240,
|
||||
"maxmem": 536870912,
|
||||
"mem": 0,
|
||||
"name": "test-lxc.home.arpa",
|
||||
"netin": 0,
|
||||
"netout": 0,
|
||||
"node": NODE2,
|
||||
"pool": "pool1",
|
||||
"status": "stopped",
|
||||
"template": 0,
|
||||
"type": "lxc",
|
||||
"uptime": 0,
|
||||
"vmid": 104,
|
||||
},
|
||||
{
|
||||
"cpu": 0,
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"id": "lxc/105",
|
||||
"maxcpu": 2,
|
||||
"maxdisk": 10737418240,
|
||||
"maxmem": 536870912,
|
||||
"mem": 0,
|
||||
"name": "",
|
||||
"netin": 0,
|
||||
"netout": 0,
|
||||
"node": NODE2,
|
||||
"pool": "pool1",
|
||||
"status": "stopped",
|
||||
"template": 0,
|
||||
"type": "lxc",
|
||||
"uptime": 0,
|
||||
"vmid": 105,
|
||||
},
|
||||
]
|
||||
RAW_LXC_OUTPUT = [
|
||||
{
|
||||
|
@ -154,6 +196,44 @@ RAW_LXC_OUTPUT = [
|
|||
"uptime": 161,
|
||||
"vmid": "102",
|
||||
},
|
||||
{
|
||||
"cpu": 0,
|
||||
"cpus": 2,
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"maxdisk": 10737418240,
|
||||
"maxmem": 536870912,
|
||||
"maxswap": 536870912,
|
||||
"mem": 0,
|
||||
"name": "test-lxc.home.arpa",
|
||||
"netin": 0,
|
||||
"netout": 0,
|
||||
"status": "stopped",
|
||||
"swap": 0,
|
||||
"type": "lxc",
|
||||
"uptime": 0,
|
||||
"vmid": "104",
|
||||
},
|
||||
{
|
||||
"cpu": 0,
|
||||
"cpus": 2,
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"maxdisk": 10737418240,
|
||||
"maxmem": 536870912,
|
||||
"maxswap": 536870912,
|
||||
"mem": 0,
|
||||
"name": "",
|
||||
"netin": 0,
|
||||
"netout": 0,
|
||||
"status": "stopped",
|
||||
"swap": 0,
|
||||
"type": "lxc",
|
||||
"uptime": 0,
|
||||
"vmid": "105",
|
||||
},
|
||||
]
|
||||
RAW_QEMU_OUTPUT = [
|
||||
{
|
||||
|
@ -283,6 +363,54 @@ EXPECTED_VMS_OUTPUT = [
|
|||
"uptime": 0,
|
||||
"vmid": 103,
|
||||
},
|
||||
{
|
||||
"cpu": 0,
|
||||
"cpus": 2,
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"id": "lxc/104",
|
||||
"maxcpu": 2,
|
||||
"maxdisk": 10737418240,
|
||||
"maxmem": 536870912,
|
||||
"maxswap": 536870912,
|
||||
"mem": 0,
|
||||
"name": "test-lxc.home.arpa",
|
||||
"netin": 0,
|
||||
"netout": 0,
|
||||
"node": NODE2,
|
||||
"pool": "pool1",
|
||||
"status": "stopped",
|
||||
"swap": 0,
|
||||
"template": False,
|
||||
"type": "lxc",
|
||||
"uptime": 0,
|
||||
"vmid": 104,
|
||||
},
|
||||
{
|
||||
"cpu": 0,
|
||||
"cpus": 2,
|
||||
"disk": 0,
|
||||
"diskread": 0,
|
||||
"diskwrite": 0,
|
||||
"id": "lxc/105",
|
||||
"maxcpu": 2,
|
||||
"maxdisk": 10737418240,
|
||||
"maxmem": 536870912,
|
||||
"maxswap": 536870912,
|
||||
"mem": 0,
|
||||
"name": "",
|
||||
"netin": 0,
|
||||
"netout": 0,
|
||||
"node": NODE2,
|
||||
"pool": "pool1",
|
||||
"status": "stopped",
|
||||
"swap": 0,
|
||||
"template": False,
|
||||
"type": "lxc",
|
||||
"uptime": 0,
|
||||
"vmid": 105,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
|
@ -408,9 +536,40 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
|||
assert len(result["proxmox_vms"]) == 1
|
||||
|
||||
def test_get_specific_vm_information_by_using_name(self):
|
||||
name = "test1-lxc.home.arpa"
|
||||
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
||||
{"name": name, "vmid": "103"}
|
||||
]
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
|
||||
set_module_args(get_module_args(type="all", name=name))
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["proxmox_vms"] == expected_output
|
||||
assert len(result["proxmox_vms"]) == 1
|
||||
|
||||
def test_get_multiple_vms_with_the_same_name(self):
|
||||
name = "test-lxc.home.arpa"
|
||||
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
||||
{"name": name, "vmid": "102"}
|
||||
{"name": name, "vmid": "102"},
|
||||
{"name": name, "vmid": "104"},
|
||||
]
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
|
||||
set_module_args(get_module_args(type="all", name=name))
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["proxmox_vms"] == expected_output
|
||||
assert len(result["proxmox_vms"]) == 2
|
||||
|
||||
def test_get_multiple_vms_with_the_same_name(self):
|
||||
name = ""
|
||||
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
||||
{"name": name, "vmid": "105"},
|
||||
]
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
|
@ -452,11 +611,7 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
|||
|
||||
def test_get_all_vms_from_specific_node(self):
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
expected_output = [
|
||||
vm
|
||||
for vm in EXPECTED_VMS_OUTPUT
|
||||
if vm["node"] == NODE1
|
||||
]
|
||||
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["node"] == NODE1]
|
||||
set_module_args(get_module_args(node=NODE1))
|
||||
self.module.main()
|
||||
|
||||
|
@ -464,23 +619,14 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
|||
assert result["proxmox_vms"] == expected_output
|
||||
assert len(result["proxmox_vms"]) == 2
|
||||
|
||||
def test_module_fail_when_vm_does_not_exist_on_node(self):
|
||||
with pytest.raises(AnsibleFailJson) as exc_info:
|
||||
vmid = 200
|
||||
set_module_args(get_module_args(type="all", vmid=vmid, node=NODE1))
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["msg"] == "VM with vmid 200 doesn't exist on node pve"
|
||||
|
||||
def test_module_fail_when_vm_does_not_exist_in_cluster(self):
|
||||
with pytest.raises(AnsibleFailJson) as exc_info:
|
||||
def test_module_returns_empty_list_when_vm_does_not_exist(self):
|
||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||
vmid = 200
|
||||
set_module_args(get_module_args(type="all", vmid=vmid))
|
||||
self.module.main()
|
||||
|
||||
result = exc_info.value.args[0]
|
||||
assert result["msg"] == "VM with vmid 200 doesn't exist in cluster"
|
||||
assert result["proxmox_vms"] == []
|
||||
|
||||
def test_module_fail_when_qemu_request_fails(self):
|
||||
self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError(
|
||||
|
|
Loading…
Reference in a new issue