mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
[PR #7049/2089769c backport][stable-7] [proxmox_vm_info] Return empty list when requested VM doesn't exist (#7079)
[proxmox_vm_info] Return empty list when requested VM doesn't exist (#7049)
* [proxmox_vm_info] Return empty list when requested VM doesn't exist
* Update documentation
* Add changelog fragment
* Address review comments
* Allow to filter by empty name
* Update plugins/modules/proxmox_vm_info.py
Co-authored-by: Felix Fontein <felix@fontein.de>
---------
Co-authored-by: Felix Fontein <felix@fontein.de>
(cherry picked from commit 2089769ccc
)
Co-authored-by: Sergei Antipov <greendayonfire@gmail.com>
This commit is contained in:
parent
7ebb301930
commit
1a801323a8
3 changed files with 185 additions and 42 deletions
|
@ -0,0 +1,2 @@
|
||||||
|
minor_changes:
|
||||||
|
- proxmox_vm_info - non-existing provided by name/vmid VM would return empty results instead of failing (https://github.com/ansible-collections/community.general/pull/7049).
|
|
@ -34,11 +34,12 @@ options:
|
||||||
vmid:
|
vmid:
|
||||||
description:
|
description:
|
||||||
- Restrict results to a specific virtual machine by using its ID.
|
- Restrict results to a specific virtual machine by using its ID.
|
||||||
|
- If VM with the specified vmid does not exist in a cluster then resulting list will be empty.
|
||||||
type: int
|
type: int
|
||||||
name:
|
name:
|
||||||
description:
|
description:
|
||||||
- Restrict results to a specific virtual machine by using its name.
|
- Restrict results to a specific virtual machine(s) by using their name.
|
||||||
- If multiple virtual machines have the same name then vmid must be used instead.
|
- If VM(s) with the specified name do not exist in a cluster then the resulting list will be empty.
|
||||||
type: str
|
type: str
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- community.general.proxmox.documentation
|
- community.general.proxmox.documentation
|
||||||
|
@ -153,13 +154,14 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
|
||||||
msg="Failed to retrieve VMs information from cluster resources: %s" % e
|
msg="Failed to retrieve VMs information from cluster resources: %s" % e
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, node=None):
|
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, name=None, node=None):
|
||||||
vms = []
|
vms = []
|
||||||
for vm in vms_unfiltered:
|
for vm in vms_unfiltered:
|
||||||
if (
|
if (
|
||||||
type != vm["type"]
|
type != vm["type"]
|
||||||
or (node and vm["node"] != node)
|
or (node and vm["node"] != node)
|
||||||
or (vmid and int(vm["vmid"]) != vmid)
|
or (vmid and int(vm["vmid"]) != vmid)
|
||||||
|
or (name is not None and vm["name"] != name)
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
vms.append(vm)
|
vms.append(vm)
|
||||||
|
@ -179,15 +181,15 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
|
||||||
|
|
||||||
return vms
|
return vms
|
||||||
|
|
||||||
def get_qemu_vms(self, vms_unfiltered, vmid=None, node=None):
|
def get_qemu_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||||
try:
|
try:
|
||||||
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, node)
|
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, name, node)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
|
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
|
||||||
|
|
||||||
def get_lxc_vms(self, vms_unfiltered, vmid=None, node=None):
|
def get_lxc_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
|
||||||
try:
|
try:
|
||||||
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, node)
|
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, name, node)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
|
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
|
||||||
|
|
||||||
|
@ -222,30 +224,23 @@ def main():
|
||||||
if node and proxmox.get_node(node) is None:
|
if node and proxmox.get_node(node) is None:
|
||||||
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
|
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
|
||||||
|
|
||||||
if not vmid and name:
|
|
||||||
vmid = int(proxmox.get_vmid(name, ignore_missing=False))
|
|
||||||
|
|
||||||
vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
|
vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
|
||||||
vms = None
|
vms = []
|
||||||
|
|
||||||
if type == "lxc":
|
if type == "lxc":
|
||||||
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
|
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||||
elif type == "qemu":
|
elif type == "qemu":
|
||||||
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, node)
|
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, name, node)
|
||||||
else:
|
else:
|
||||||
vms = proxmox.get_qemu_vms(
|
vms = proxmox.get_qemu_vms(
|
||||||
vms_cluster_resources, vmid, node
|
vms_cluster_resources,
|
||||||
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
|
vmid,
|
||||||
|
name,
|
||||||
|
node,
|
||||||
|
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
|
||||||
|
|
||||||
if vms or vmid is None:
|
|
||||||
result["proxmox_vms"] = vms
|
result["proxmox_vms"] = vms
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
else:
|
|
||||||
if node is None:
|
|
||||||
result["msg"] = "VM with vmid %s doesn't exist in cluster" % (vmid)
|
|
||||||
else:
|
|
||||||
result["msg"] = "VM with vmid %s doesn't exist on node %s" % (vmid, node)
|
|
||||||
module.fail_json(**result)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -113,6 +113,48 @@ RAW_CLUSTER_OUTPUT = [
|
||||||
"uptime": 0,
|
"uptime": 0,
|
||||||
"vmid": 103,
|
"vmid": 103,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cpu": 0,
|
||||||
|
"disk": 0,
|
||||||
|
"diskread": 0,
|
||||||
|
"diskwrite": 0,
|
||||||
|
"id": "lxc/104",
|
||||||
|
"maxcpu": 2,
|
||||||
|
"maxdisk": 10737418240,
|
||||||
|
"maxmem": 536870912,
|
||||||
|
"mem": 0,
|
||||||
|
"name": "test-lxc.home.arpa",
|
||||||
|
"netin": 0,
|
||||||
|
"netout": 0,
|
||||||
|
"node": NODE2,
|
||||||
|
"pool": "pool1",
|
||||||
|
"status": "stopped",
|
||||||
|
"template": 0,
|
||||||
|
"type": "lxc",
|
||||||
|
"uptime": 0,
|
||||||
|
"vmid": 104,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cpu": 0,
|
||||||
|
"disk": 0,
|
||||||
|
"diskread": 0,
|
||||||
|
"diskwrite": 0,
|
||||||
|
"id": "lxc/105",
|
||||||
|
"maxcpu": 2,
|
||||||
|
"maxdisk": 10737418240,
|
||||||
|
"maxmem": 536870912,
|
||||||
|
"mem": 0,
|
||||||
|
"name": "",
|
||||||
|
"netin": 0,
|
||||||
|
"netout": 0,
|
||||||
|
"node": NODE2,
|
||||||
|
"pool": "pool1",
|
||||||
|
"status": "stopped",
|
||||||
|
"template": 0,
|
||||||
|
"type": "lxc",
|
||||||
|
"uptime": 0,
|
||||||
|
"vmid": 105,
|
||||||
|
},
|
||||||
]
|
]
|
||||||
RAW_LXC_OUTPUT = [
|
RAW_LXC_OUTPUT = [
|
||||||
{
|
{
|
||||||
|
@ -154,6 +196,44 @@ RAW_LXC_OUTPUT = [
|
||||||
"uptime": 161,
|
"uptime": 161,
|
||||||
"vmid": "102",
|
"vmid": "102",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cpu": 0,
|
||||||
|
"cpus": 2,
|
||||||
|
"disk": 0,
|
||||||
|
"diskread": 0,
|
||||||
|
"diskwrite": 0,
|
||||||
|
"maxdisk": 10737418240,
|
||||||
|
"maxmem": 536870912,
|
||||||
|
"maxswap": 536870912,
|
||||||
|
"mem": 0,
|
||||||
|
"name": "test-lxc.home.arpa",
|
||||||
|
"netin": 0,
|
||||||
|
"netout": 0,
|
||||||
|
"status": "stopped",
|
||||||
|
"swap": 0,
|
||||||
|
"type": "lxc",
|
||||||
|
"uptime": 0,
|
||||||
|
"vmid": "104",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cpu": 0,
|
||||||
|
"cpus": 2,
|
||||||
|
"disk": 0,
|
||||||
|
"diskread": 0,
|
||||||
|
"diskwrite": 0,
|
||||||
|
"maxdisk": 10737418240,
|
||||||
|
"maxmem": 536870912,
|
||||||
|
"maxswap": 536870912,
|
||||||
|
"mem": 0,
|
||||||
|
"name": "",
|
||||||
|
"netin": 0,
|
||||||
|
"netout": 0,
|
||||||
|
"status": "stopped",
|
||||||
|
"swap": 0,
|
||||||
|
"type": "lxc",
|
||||||
|
"uptime": 0,
|
||||||
|
"vmid": "105",
|
||||||
|
},
|
||||||
]
|
]
|
||||||
RAW_QEMU_OUTPUT = [
|
RAW_QEMU_OUTPUT = [
|
||||||
{
|
{
|
||||||
|
@ -283,6 +363,54 @@ EXPECTED_VMS_OUTPUT = [
|
||||||
"uptime": 0,
|
"uptime": 0,
|
||||||
"vmid": 103,
|
"vmid": 103,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cpu": 0,
|
||||||
|
"cpus": 2,
|
||||||
|
"disk": 0,
|
||||||
|
"diskread": 0,
|
||||||
|
"diskwrite": 0,
|
||||||
|
"id": "lxc/104",
|
||||||
|
"maxcpu": 2,
|
||||||
|
"maxdisk": 10737418240,
|
||||||
|
"maxmem": 536870912,
|
||||||
|
"maxswap": 536870912,
|
||||||
|
"mem": 0,
|
||||||
|
"name": "test-lxc.home.arpa",
|
||||||
|
"netin": 0,
|
||||||
|
"netout": 0,
|
||||||
|
"node": NODE2,
|
||||||
|
"pool": "pool1",
|
||||||
|
"status": "stopped",
|
||||||
|
"swap": 0,
|
||||||
|
"template": False,
|
||||||
|
"type": "lxc",
|
||||||
|
"uptime": 0,
|
||||||
|
"vmid": 104,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cpu": 0,
|
||||||
|
"cpus": 2,
|
||||||
|
"disk": 0,
|
||||||
|
"diskread": 0,
|
||||||
|
"diskwrite": 0,
|
||||||
|
"id": "lxc/105",
|
||||||
|
"maxcpu": 2,
|
||||||
|
"maxdisk": 10737418240,
|
||||||
|
"maxmem": 536870912,
|
||||||
|
"maxswap": 536870912,
|
||||||
|
"mem": 0,
|
||||||
|
"name": "",
|
||||||
|
"netin": 0,
|
||||||
|
"netout": 0,
|
||||||
|
"node": NODE2,
|
||||||
|
"pool": "pool1",
|
||||||
|
"status": "stopped",
|
||||||
|
"swap": 0,
|
||||||
|
"template": False,
|
||||||
|
"type": "lxc",
|
||||||
|
"uptime": 0,
|
||||||
|
"vmid": 105,
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -408,9 +536,40 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
||||||
assert len(result["proxmox_vms"]) == 1
|
assert len(result["proxmox_vms"]) == 1
|
||||||
|
|
||||||
def test_get_specific_vm_information_by_using_name(self):
|
def test_get_specific_vm_information_by_using_name(self):
|
||||||
|
name = "test1-lxc.home.arpa"
|
||||||
|
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
||||||
|
{"name": name, "vmid": "103"}
|
||||||
|
]
|
||||||
|
|
||||||
|
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||||
|
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
|
||||||
|
set_module_args(get_module_args(type="all", name=name))
|
||||||
|
self.module.main()
|
||||||
|
|
||||||
|
result = exc_info.value.args[0]
|
||||||
|
assert result["proxmox_vms"] == expected_output
|
||||||
|
assert len(result["proxmox_vms"]) == 1
|
||||||
|
|
||||||
|
def test_get_multiple_vms_with_the_same_name(self):
|
||||||
name = "test-lxc.home.arpa"
|
name = "test-lxc.home.arpa"
|
||||||
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
||||||
{"name": name, "vmid": "102"}
|
{"name": name, "vmid": "102"},
|
||||||
|
{"name": name, "vmid": "104"},
|
||||||
|
]
|
||||||
|
|
||||||
|
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||||
|
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
|
||||||
|
set_module_args(get_module_args(type="all", name=name))
|
||||||
|
self.module.main()
|
||||||
|
|
||||||
|
result = exc_info.value.args[0]
|
||||||
|
assert result["proxmox_vms"] == expected_output
|
||||||
|
assert len(result["proxmox_vms"]) == 2
|
||||||
|
|
||||||
|
def test_get_multiple_vms_with_the_same_name(self):
|
||||||
|
name = ""
|
||||||
|
self.connect_mock.return_value.cluster.resources.get.return_value = [
|
||||||
|
{"name": name, "vmid": "105"},
|
||||||
]
|
]
|
||||||
|
|
||||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||||
|
@ -452,11 +611,7 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
||||||
|
|
||||||
def test_get_all_vms_from_specific_node(self):
|
def test_get_all_vms_from_specific_node(self):
|
||||||
with pytest.raises(AnsibleExitJson) as exc_info:
|
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||||
expected_output = [
|
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["node"] == NODE1]
|
||||||
vm
|
|
||||||
for vm in EXPECTED_VMS_OUTPUT
|
|
||||||
if vm["node"] == NODE1
|
|
||||||
]
|
|
||||||
set_module_args(get_module_args(node=NODE1))
|
set_module_args(get_module_args(node=NODE1))
|
||||||
self.module.main()
|
self.module.main()
|
||||||
|
|
||||||
|
@ -464,23 +619,14 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
|
||||||
assert result["proxmox_vms"] == expected_output
|
assert result["proxmox_vms"] == expected_output
|
||||||
assert len(result["proxmox_vms"]) == 2
|
assert len(result["proxmox_vms"]) == 2
|
||||||
|
|
||||||
def test_module_fail_when_vm_does_not_exist_on_node(self):
|
def test_module_returns_empty_list_when_vm_does_not_exist(self):
|
||||||
with pytest.raises(AnsibleFailJson) as exc_info:
|
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||||
vmid = 200
|
|
||||||
set_module_args(get_module_args(type="all", vmid=vmid, node=NODE1))
|
|
||||||
self.module.main()
|
|
||||||
|
|
||||||
result = exc_info.value.args[0]
|
|
||||||
assert result["msg"] == "VM with vmid 200 doesn't exist on node pve"
|
|
||||||
|
|
||||||
def test_module_fail_when_vm_does_not_exist_in_cluster(self):
|
|
||||||
with pytest.raises(AnsibleFailJson) as exc_info:
|
|
||||||
vmid = 200
|
vmid = 200
|
||||||
set_module_args(get_module_args(type="all", vmid=vmid))
|
set_module_args(get_module_args(type="all", vmid=vmid))
|
||||||
self.module.main()
|
self.module.main()
|
||||||
|
|
||||||
result = exc_info.value.args[0]
|
result = exc_info.value.args[0]
|
||||||
assert result["msg"] == "VM with vmid 200 doesn't exist in cluster"
|
assert result["proxmox_vms"] == []
|
||||||
|
|
||||||
def test_module_fail_when_qemu_request_fails(self):
|
def test_module_fail_when_qemu_request_fails(self):
|
||||||
self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError(
|
self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError(
|
||||||
|
|
Loading…
Reference in a new issue