1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

[stable-7] Revert new features to be able to do 7.2.1 release (#7042)

* Revert "[PR #7020/b46d5d81 backport][stable-7] redfish_utils: Add support for "nextLink" property tag pagination (#7026)"

This reverts commit 1dad95370e.

* Revert "[PR #6914/17b4219b backport][stable-7] proxmox_kvm: enable 'force' restart of vm (as documented) (#6997)"

This reverts commit 7d68af57af.

* Revert "[PR #6976/d7c1a814 backport][stable-7] [proxmox_vm_info] Re-use cluster resources API to use module without requiring node param (#6993)"

This reverts commit fb3768aada.
This commit is contained in:
Felix Fontein 2023-07-31 15:45:08 +02:00 committed by GitHub
parent eda3d160fa
commit 7cf834fb3c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 146 additions and 423 deletions

View file

@ -1,2 +0,0 @@
minor_changes:
- proxmox_kvm - enabled force restart of VM, bringing the ``force`` parameter functionality in line with what is described in the docs (https://github.com/ansible-collections/community.general/pull/6914).

View file

@ -1,2 +0,0 @@
minor_changes:
- proxmox_vm_info - ``node`` parameter is no longer required. Information can be obtained for the whole cluster (https://github.com/ansible-collections/community.general/pull/6976).

View file

@ -1,2 +0,0 @@
minor_changes:
- redfish_utils module utils - add support for following ``@odata.nextLink`` pagination in ``software_inventory`` responses feature (https://github.com/ansible-collections/community.general/pull/7020).

View file

@ -1499,37 +1499,29 @@ class RedfishUtils(object):
def _software_inventory(self, uri):
result = {}
result['entries'] = []
response = self.get_request(self.root_uri + uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
while uri:
response = self.get_request(self.root_uri + uri)
result['entries'] = []
for member in data[u'Members']:
uri = self.root_uri + member[u'@odata.id']
# Get details for each software or firmware member
response = self.get_request(uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
if data.get('Members@odata.nextLink'):
uri = data.get('Members@odata.nextLink')
else:
uri = None
for member in data[u'Members']:
fw_uri = self.root_uri + member[u'@odata.id']
# Get details for each software or firmware member
response = self.get_request(fw_uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
software = {}
# Get these standard properties if present
for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
'ReleaseDate']:
if key in data:
software[key] = data.get(key)
result['entries'].append(software)
software = {}
# Get these standard properties if present
for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
'ReleaseDate']:
if key in data:
software[key] = data.get(key)
result['entries'].append(software)
return result
def get_firmware_inventory(self):

View file

@ -1111,11 +1111,11 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
return False
return True
def restart_vm(self, vm, force, **status):
def restart_vm(self, vm, **status):
vmid = vm['vmid']
try:
proxmox_node = self.proxmox_api.nodes(vm['node'])
taskid = proxmox_node.qemu(vmid).status.reset.post() if force else proxmox_node.qemu(vmid).status.reboot.post()
taskid = proxmox_node.qemu(vmid).status.reboot.post()
if not self.wait_for_task(vm['node'], taskid):
self.module.fail_json(msg='Reached timeout while waiting for rebooting VM. Last line in task before timeout: %s' %
proxmox_node.tasks(taskid).log.get()[:1])
@ -1493,7 +1493,7 @@ def main():
if vm['status'] == 'stopped':
module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
if proxmox.restart_vm(vm, force=module.params['force']):
if proxmox.restart_vm(vm):
module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
elif state == 'absent':

View file

@ -20,7 +20,8 @@ author: 'Sergei Antipov (@UnderGreen) <greendayonfire at gmail dot com>'
options:
node:
description:
- Restrict results to a specific Proxmox VE node.
- Node where to get virtual machines info.
required: true
type: str
type:
description:
@ -96,18 +97,14 @@ proxmox_vms:
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "qemu/100",
"maxcpu": 1,
"maxdisk": 34359738368,
"maxmem": 4294967296,
"mem": 35158379,
"name": "pxe.home.arpa",
"netin": 99715803,
"netout": 14237835,
"node": "pve",
"pid": 1947197,
"status": "running",
"template": False,
"type": "qemu",
"uptime": 135530,
"vmid": 100
@ -118,17 +115,13 @@ proxmox_vms:
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "qemu/101",
"maxcpu": 1,
"maxdisk": 0,
"maxmem": 536870912,
"mem": 0,
"name": "test1",
"netin": 0,
"netout": 0,
"node": "pve",
"status": "stopped",
"template": False,
"type": "qemu",
"uptime": 0,
"vmid": 101
@ -140,54 +133,30 @@ from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.proxmox import (
proxmox_auth_argument_spec,
ProxmoxAnsible,
proxmox_to_ansible_bool,
)
class ProxmoxVmInfoAnsible(ProxmoxAnsible):
def get_vms_from_cluster_resources(self):
def get_qemu_vms(self, node, vmid=None):
try:
return self.proxmox_api.cluster().resources().get(type="vm")
except Exception as e:
self.module.fail_json(
msg="Failed to retrieve VMs information from cluster resources: %s" % e
)
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, node=None):
vms = []
for vm in vms_unfiltered:
if (
type != vm["type"]
or (node and vm["node"] != node)
or (vmid and int(vm["vmid"]) != vmid)
):
continue
vms.append(vm)
nodes = frozenset([vm["node"] for vm in vms])
for node in nodes:
if type == "qemu":
vms_from_nodes = self.proxmox_api.nodes(node).qemu().get()
else:
vms_from_nodes = self.proxmox_api.nodes(node).lxc().get()
for vmn in vms_from_nodes:
for vm in vms:
if int(vm["vmid"]) == int(vmn["vmid"]):
vm.update(vmn)
vm["vmid"] = int(vm["vmid"])
vm["template"] = proxmox_to_ansible_bool(vm["template"])
break
return vms
def get_qemu_vms(self, vms_unfiltered, vmid=None, node=None):
try:
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, node)
vms = self.proxmox_api.nodes(node).qemu().get()
for vm in vms:
vm["vmid"] = int(vm["vmid"])
vm["type"] = "qemu"
if vmid is None:
return vms
return [vm for vm in vms if vm["vmid"] == vmid]
except Exception as e:
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
def get_lxc_vms(self, vms_unfiltered, vmid=None, node=None):
def get_lxc_vms(self, node, vmid=None):
try:
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, node)
vms = self.proxmox_api.nodes(node).lxc().get()
for vm in vms:
vm["vmid"] = int(vm["vmid"])
if vmid is None:
return vms
return [vm for vm in vms if vm["vmid"] == vmid]
except Exception as e:
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
@ -195,7 +164,7 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
def main():
module_args = proxmox_auth_argument_spec()
vm_info_args = dict(
node=dict(type="str", required=False),
node=dict(type="str", required=True),
type=dict(
type="str", choices=["lxc", "qemu", "all"], default="all", required=False
),
@ -219,32 +188,27 @@ def main():
result = dict(changed=False)
if node and proxmox.get_node(node) is None:
if proxmox.get_node(node) is None:
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
if not vmid and name:
vmid = int(proxmox.get_vmid(name, ignore_missing=False))
vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
vms = None
if type == "lxc":
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
vms = proxmox.get_lxc_vms(node, vmid=vmid)
elif type == "qemu":
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, node)
vms = proxmox.get_qemu_vms(node, vmid=vmid)
else:
vms = proxmox.get_qemu_vms(
vms_cluster_resources, vmid, node
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
vms = proxmox.get_qemu_vms(node, vmid=vmid) + proxmox.get_lxc_vms(
node, vmid=vmid
)
if vms or vmid is None:
result["proxmox_vms"] = vms
module.exit_json(**result)
else:
if node is None:
result["msg"] = "VM with vmid %s doesn't exist in cluster" % (vmid)
else:
result["msg"] = "VM with vmid %s doesn't exist on node %s" % (vmid, node)
result["msg"] = "VM with vmid %s doesn't exist on node %s" % (vmid, node)
module.fail_json(**result)

View file

@ -28,276 +28,107 @@ from ansible_collections.community.general.tests.unit.plugins.modules.utils impo
)
import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
NODE1 = "pve"
NODE2 = "pve2"
RAW_CLUSTER_OUTPUT = [
NODE = "pve"
LXC_VMS = [
{
"cpu": 0.174069059487628,
"disk": 0,
"diskread": 6656,
"diskwrite": 0,
"id": "qemu/100",
"maxcpu": 1,
"maxdisk": 34359738368,
"maxmem": 4294967296,
"mem": 35304543,
"name": "pxe.home.arpa",
"netin": 416956,
"netout": 17330,
"node": NODE1,
"status": "running",
"template": 0,
"type": "qemu",
"uptime": 669,
"vmid": 100,
},
{
"cpu": 0,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "qemu/101",
"maxcpu": 1,
"maxdisk": 0,
"maxmem": 536870912,
"mem": 0,
"name": "test1",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"template": 0,
"type": "qemu",
"uptime": 0,
"vmid": 101,
},
{
"cpu": 0,
"disk": 352190464,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/102",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"mem": 28192768,
"name": "test-lxc.home.arpa",
"netin": 102757,
"netout": 446,
"node": NODE1,
"status": "running",
"template": 0,
"type": "lxc",
"uptime": 161,
"vmid": 102,
},
{
"cpu": 0,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/103",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"mem": 0,
"name": "test1-lxc.home.arpa",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"template": 0,
"type": "lxc",
"uptime": 0,
"vmid": 103,
},
]
RAW_LXC_OUTPUT = [
{
"cpu": 0,
"cpus": 2,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"maxdisk": 10737418240,
"maxmem": 536870912,
"uptime": 47,
"maxswap": 536870912,
"mem": 0,
"name": "test1-lxc.home.arpa",
"netin": 0,
"netout": 0,
"status": "stopped",
"swap": 0,
"type": "lxc",
"uptime": 0,
"vmid": "103",
},
{
"cpu": 0,
"cpus": 2,
"disk": 352190464,
"diskread": 0,
"diskwrite": 0,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 28192768,
"name": "test-lxc.home.arpa",
"netin": 102757,
"netout": 446,
"pid": 4076752,
"status": "running",
"swap": 0,
"type": "lxc",
"uptime": 161,
"vmid": "102",
},
]
RAW_QEMU_OUTPUT = [
{
"cpu": 0,
"cpus": 1,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"maxdisk": 0,
"maxmem": 536870912,
"mem": 0,
"name": "test1",
"netin": 0,
"netout": 0,
"status": "stopped",
"uptime": 0,
"vmid": 101,
},
{
"cpu": 0.174069059487628,
"cpus": 1,
"disk": 0,
"diskread": 6656,
"diskwrite": 0,
"maxdisk": 34359738368,
"maxmem": 4294967296,
"mem": 35304543,
"name": "pxe.home.arpa",
"netin": 416956,
"netout": 17330,
"pid": 4076688,
"status": "running",
"uptime": 669,
"vmid": 100,
},
]
EXPECTED_VMS_OUTPUT = [
{
"cpu": 0.174069059487628,
"cpus": 1,
"disk": 0,
"diskread": 6656,
"diskwrite": 0,
"id": "qemu/100",
"maxcpu": 1,
"maxdisk": 34359738368,
"maxmem": 4294967296,
"mem": 35304543,
"name": "pxe.home.arpa",
"netin": 416956,
"netout": 17330,
"node": NODE1,
"pid": 4076688,
"status": "running",
"template": False,
"type": "qemu",
"uptime": 669,
"vmid": 100,
},
{
"cpu": 0,
"cpus": 1,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "qemu/101",
"maxcpu": 1,
"maxdisk": 0,
"maxmem": 536870912,
"mem": 0,
"name": "test1",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"template": False,
"type": "qemu",
"uptime": 0,
"vmid": 101,
},
{
"cpu": 0,
"type": "lxc",
"swap": 0,
"cpus": 2,
"disk": 352190464,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/102",
"maxcpu": 2,
"mem": 29134848,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 28192768,
"name": "test-lxc.home.arpa",
"netin": 102757,
"diskwrite": 0,
"netin": 35729,
"netout": 446,
"node": NODE1,
"pid": 4076752,
"status": "running",
"swap": 0,
"template": False,
"type": "lxc",
"uptime": 161,
"vmid": 102,
"pid": 1412780,
"maxmem": 536870912,
"disk": 307625984,
"cpu": 0,
},
{
"cpu": 0,
"cpus": 2,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/103",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 0,
"name": "test1-lxc.home.arpa",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"cpu": 0,
"maxmem": 536870912,
"disk": 0,
"name": "test1-lxc.home.arpa",
"diskread": 0,
"status": "stopped",
"swap": 0,
"template": False,
"vmid": "103",
"type": "lxc",
"swap": 0,
"uptime": 0,
"vmid": 103,
"maxswap": 536870912,
"diskwrite": 0,
"cpus": 2,
"mem": 0,
"maxdisk": 10737418240,
},
]
QEMU_VMS = [
{
"vmid": 101,
"diskread": 0,
"status": "stopped",
"name": "test1",
"uptime": 0,
"diskwrite": 0,
"cpus": 1,
"mem": 0,
"maxdisk": 0,
"netout": 0,
"netin": 0,
"cpu": 0,
"maxmem": 536870912,
"disk": 0,
},
{
"netout": 4113,
"netin": 22738,
"pid": 1947197,
"maxmem": 4294967296,
"disk": 0,
"cpu": 0.0795350949559682,
"uptime": 41,
"vmid": 100,
"status": "running",
"diskread": 0,
"name": "pxe.home.arpa",
"cpus": 1,
"mem": 35315629,
"maxdisk": 34359738368,
"diskwrite": 0,
},
]
def get_module_args(type="all", node=None, vmid=None, name=None):
def get_module_args(type="all", vmid=None, name=None):
return {
"api_host": "host",
"api_user": "user",
"api_password": "password",
"node": node,
"node": NODE,
"type": type,
"vmid": vmid,
"name": name,
}
def normalized_expected_vms_output(vms):
result = [vm.copy() for vm in vms]
for vm in result:
if "type" not in vm:
# response for QEMU VMs doesn't contain type field, adding it
vm["type"] = "qemu"
vm["vmid"] = int(vm["vmid"])
return result
class TestProxmoxVmInfoModule(ModuleTestCase):
def setUp(self):
super(TestProxmoxVmInfoModule, self).setUp()
@ -307,15 +138,12 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
"ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect",
).start()
self.connect_mock.return_value.nodes.return_value.lxc.return_value.get.return_value = (
RAW_LXC_OUTPUT
LXC_VMS
)
self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.return_value = (
RAW_QEMU_OUTPUT
QEMU_VMS
)
self.connect_mock.return_value.cluster.return_value.resources.return_value.get.return_value = (
RAW_CLUSTER_OUTPUT
)
self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE1}]
self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE}]
def tearDown(self):
self.connect_mock.stop()
@ -327,7 +155,7 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
self.module.main()
result = exc_info.value.args[0]
assert result["msg"] == "missing required arguments: api_host, api_user"
assert result["msg"] == "missing required arguments: api_host, api_user, node"
def test_get_lxc_vms_information(self):
with pytest.raises(AnsibleExitJson) as exc_info:
@ -336,34 +164,36 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
result = exc_info.value.args[0]
assert result["changed"] is False
assert result["proxmox_vms"] == [
vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "lxc"
]
assert result["proxmox_vms"] == LXC_VMS
def test_get_qemu_vms_information(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = normalized_expected_vms_output(QEMU_VMS)
set_module_args(get_module_args(type="qemu"))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == [
vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "qemu"
]
assert result["proxmox_vms"] == expected_output
def test_get_all_vms_information(self):
with pytest.raises(AnsibleExitJson) as exc_info:
qemu_output = normalized_expected_vms_output(QEMU_VMS)
expected_output = qemu_output + LXC_VMS
set_module_args(get_module_args())
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == EXPECTED_VMS_OUTPUT
assert result["proxmox_vms"] == expected_output
def test_vmid_is_converted_to_int(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = normalized_expected_vms_output(LXC_VMS)
set_module_args(get_module_args(type="lxc"))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert isinstance(result["proxmox_vms"][0]["vmid"], int)
def test_get_specific_lxc_vm_information(self):
@ -371,8 +201,8 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
vmid = 102
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["vmid"] == vmid and vm["type"] == "lxc"
for vm in normalized_expected_vms_output(LXC_VMS)
if vm["vmid"] == vmid
]
set_module_args(get_module_args(type="lxc", vmid=vmid))
self.module.main()
@ -386,8 +216,8 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
vmid = 100
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["vmid"] == vmid and vm["type"] == "qemu"
for vm in normalized_expected_vms_output(QEMU_VMS)
if vm["vmid"] == vmid
]
set_module_args(get_module_args(type="qemu", vmid=vmid))
self.module.main()
@ -399,7 +229,11 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
def test_get_specific_vm_information(self):
with pytest.raises(AnsibleExitJson) as exc_info:
vmid = 100
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["vmid"] == vmid]
expected_output = [
vm
for vm in normalized_expected_vms_output(QEMU_VMS + LXC_VMS)
if vm["vmid"] == vmid
]
set_module_args(get_module_args(type="all", vmid=vmid))
self.module.main()
@ -414,7 +248,11 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
]
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
expected_output = [
vm
for vm in normalized_expected_vms_output(QEMU_VMS + LXC_VMS)
if vm["name"] == name
]
set_module_args(get_module_args(type="all", name=name))
self.module.main()
@ -422,65 +260,14 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 1
def test_get_all_lxc_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["node"] == NODE1 and vm["type"] == "lxc"
]
set_module_args(get_module_args(type="lxc", node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 1
def test_get_all_qemu_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["node"] == NODE1 and vm["type"] == "qemu"
]
set_module_args(get_module_args(type="qemu", node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 1
def test_get_all_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["node"] == NODE1
]
set_module_args(get_module_args(node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 2
def test_module_fail_when_vm_does_not_exist_on_node(self):
with pytest.raises(AnsibleFailJson) as exc_info:
vmid = 200
set_module_args(get_module_args(type="all", vmid=vmid, node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["msg"] == "VM with vmid 200 doesn't exist on node pve"
def test_module_fail_when_vm_does_not_exist_in_cluster(self):
with pytest.raises(AnsibleFailJson) as exc_info:
vmid = 200
set_module_args(get_module_args(type="all", vmid=vmid))
self.module.main()
result = exc_info.value.args[0]
assert result["msg"] == "VM with vmid 200 doesn't exist in cluster"
assert result["msg"] == "VM with vmid 200 doesn't exist on node pve"
def test_module_fail_when_qemu_request_fails(self):
self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError(
@ -504,24 +291,10 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
result = exc_info.value.args[0]
assert "Failed to retrieve LXC VMs information:" in result["msg"]
def test_module_fail_when_cluster_resources_request_fails(self):
self.connect_mock.return_value.cluster.return_value.resources.return_value.get.side_effect = IOError(
"Some mocked connection error."
)
with pytest.raises(AnsibleFailJson) as exc_info:
set_module_args(get_module_args())
self.module.main()
result = exc_info.value.args[0]
assert (
"Failed to retrieve VMs information from cluster resources:"
in result["msg"]
)
def test_module_fail_when_node_does_not_exist(self):
self.connect_mock.return_value.nodes.get.return_value = []
with pytest.raises(AnsibleFailJson) as exc_info:
set_module_args(get_module_args(type="all", node=NODE1))
set_module_args(get_module_args(type="all"))
self.module.main()
result = exc_info.value.args[0]