1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

Revert "[stable-7] Revert new features to be able to do 7.2.1 release (#7042)"

This reverts commit 7cf834fb3c.
This commit is contained in:
Felix Fontein 2023-07-31 16:29:37 +02:00
parent 61314898ca
commit e6f65634fe
7 changed files with 434 additions and 157 deletions

View file

@ -0,0 +1,2 @@
minor_changes:
- proxmox_kvm - enabled force restart of VM, bringing the ``force`` parameter functionality in line with what is described in the docs (https://github.com/ansible-collections/community.general/pull/6914).

View file

@ -0,0 +1,2 @@
minor_changes:
- proxmox_vm_info - ``node`` parameter is no longer required. Information can be obtained for the whole cluster (https://github.com/ansible-collections/community.general/pull/6976).

View file

@ -0,0 +1,2 @@
minor_changes:
- redfish_utils module utils - add support for following ``@odata.nextLink`` pagination in ``software_inventory`` responses feature (https://github.com/ansible-collections/community.general/pull/7020).

View file

@ -1499,29 +1499,37 @@ class RedfishUtils(object):
def _software_inventory(self, uri): def _software_inventory(self, uri):
result = {} result = {}
response = self.get_request(self.root_uri + uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
result['entries'] = [] result['entries'] = []
for member in data[u'Members']:
uri = self.root_uri + member[u'@odata.id'] while uri:
# Get details for each software or firmware member response = self.get_request(self.root_uri + uri)
response = self.get_request(uri)
if response['ret'] is False: if response['ret'] is False:
return response return response
result['ret'] = True result['ret'] = True
data = response['data'] data = response['data']
software = {} if data.get('Members@odata.nextLink'):
# Get these standard properties if present uri = data.get('Members@odata.nextLink')
for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', else:
'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', uri = None
'ReleaseDate']:
if key in data: for member in data[u'Members']:
software[key] = data.get(key) fw_uri = self.root_uri + member[u'@odata.id']
result['entries'].append(software) # Get details for each software or firmware member
response = self.get_request(fw_uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
software = {}
# Get these standard properties if present
for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
'ReleaseDate']:
if key in data:
software[key] = data.get(key)
result['entries'].append(software)
return result return result
def get_firmware_inventory(self): def get_firmware_inventory(self):

View file

@ -1111,11 +1111,11 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
return False return False
return True return True
def restart_vm(self, vm, **status): def restart_vm(self, vm, force, **status):
vmid = vm['vmid'] vmid = vm['vmid']
try: try:
proxmox_node = self.proxmox_api.nodes(vm['node']) proxmox_node = self.proxmox_api.nodes(vm['node'])
taskid = proxmox_node.qemu(vmid).status.reboot.post() taskid = proxmox_node.qemu(vmid).status.reset.post() if force else proxmox_node.qemu(vmid).status.reboot.post()
if not self.wait_for_task(vm['node'], taskid): if not self.wait_for_task(vm['node'], taskid):
self.module.fail_json(msg='Reached timeout while waiting for rebooting VM. Last line in task before timeout: %s' % self.module.fail_json(msg='Reached timeout while waiting for rebooting VM. Last line in task before timeout: %s' %
proxmox_node.tasks(taskid).log.get()[:1]) proxmox_node.tasks(taskid).log.get()[:1])
@ -1493,7 +1493,7 @@ def main():
if vm['status'] == 'stopped': if vm['status'] == 'stopped':
module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
if proxmox.restart_vm(vm): if proxmox.restart_vm(vm, force=module.params['force']):
module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status) module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
elif state == 'absent': elif state == 'absent':

View file

@ -20,8 +20,7 @@ author: 'Sergei Antipov (@UnderGreen) <greendayonfire at gmail dot com>'
options: options:
node: node:
description: description:
- Node where to get virtual machines info. - Restrict results to a specific Proxmox VE node.
required: true
type: str type: str
type: type:
description: description:
@ -97,14 +96,18 @@ proxmox_vms:
"disk": 0, "disk": 0,
"diskread": 0, "diskread": 0,
"diskwrite": 0, "diskwrite": 0,
"id": "qemu/100",
"maxcpu": 1,
"maxdisk": 34359738368, "maxdisk": 34359738368,
"maxmem": 4294967296, "maxmem": 4294967296,
"mem": 35158379, "mem": 35158379,
"name": "pxe.home.arpa", "name": "pxe.home.arpa",
"netin": 99715803, "netin": 99715803,
"netout": 14237835, "netout": 14237835,
"node": "pve",
"pid": 1947197, "pid": 1947197,
"status": "running", "status": "running",
"template": False,
"type": "qemu", "type": "qemu",
"uptime": 135530, "uptime": 135530,
"vmid": 100 "vmid": 100
@ -115,13 +118,17 @@ proxmox_vms:
"disk": 0, "disk": 0,
"diskread": 0, "diskread": 0,
"diskwrite": 0, "diskwrite": 0,
"id": "qemu/101",
"maxcpu": 1,
"maxdisk": 0, "maxdisk": 0,
"maxmem": 536870912, "maxmem": 536870912,
"mem": 0, "mem": 0,
"name": "test1", "name": "test1",
"netin": 0, "netin": 0,
"netout": 0, "netout": 0,
"node": "pve",
"status": "stopped", "status": "stopped",
"template": False,
"type": "qemu", "type": "qemu",
"uptime": 0, "uptime": 0,
"vmid": 101 "vmid": 101
@ -133,30 +140,54 @@ from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.proxmox import ( from ansible_collections.community.general.plugins.module_utils.proxmox import (
proxmox_auth_argument_spec, proxmox_auth_argument_spec,
ProxmoxAnsible, ProxmoxAnsible,
proxmox_to_ansible_bool,
) )
class ProxmoxVmInfoAnsible(ProxmoxAnsible): class ProxmoxVmInfoAnsible(ProxmoxAnsible):
def get_qemu_vms(self, node, vmid=None): def get_vms_from_cluster_resources(self):
try: try:
vms = self.proxmox_api.nodes(node).qemu().get() return self.proxmox_api.cluster().resources().get(type="vm")
for vm in vms: except Exception as e:
vm["vmid"] = int(vm["vmid"]) self.module.fail_json(
vm["type"] = "qemu" msg="Failed to retrieve VMs information from cluster resources: %s" % e
if vmid is None: )
return vms
return [vm for vm in vms if vm["vmid"] == vmid] def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, node=None):
vms = []
for vm in vms_unfiltered:
if (
type != vm["type"]
or (node and vm["node"] != node)
or (vmid and int(vm["vmid"]) != vmid)
):
continue
vms.append(vm)
nodes = frozenset([vm["node"] for vm in vms])
for node in nodes:
if type == "qemu":
vms_from_nodes = self.proxmox_api.nodes(node).qemu().get()
else:
vms_from_nodes = self.proxmox_api.nodes(node).lxc().get()
for vmn in vms_from_nodes:
for vm in vms:
if int(vm["vmid"]) == int(vmn["vmid"]):
vm.update(vmn)
vm["vmid"] = int(vm["vmid"])
vm["template"] = proxmox_to_ansible_bool(vm["template"])
break
return vms
def get_qemu_vms(self, vms_unfiltered, vmid=None, node=None):
try:
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, node)
except Exception as e: except Exception as e:
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e) self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
def get_lxc_vms(self, node, vmid=None): def get_lxc_vms(self, vms_unfiltered, vmid=None, node=None):
try: try:
vms = self.proxmox_api.nodes(node).lxc().get() return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, node)
for vm in vms:
vm["vmid"] = int(vm["vmid"])
if vmid is None:
return vms
return [vm for vm in vms if vm["vmid"] == vmid]
except Exception as e: except Exception as e:
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e) self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
@ -164,7 +195,7 @@ class ProxmoxVmInfoAnsible(ProxmoxAnsible):
def main(): def main():
module_args = proxmox_auth_argument_spec() module_args = proxmox_auth_argument_spec()
vm_info_args = dict( vm_info_args = dict(
node=dict(type="str", required=True), node=dict(type="str", required=False),
type=dict( type=dict(
type="str", choices=["lxc", "qemu", "all"], default="all", required=False type="str", choices=["lxc", "qemu", "all"], default="all", required=False
), ),
@ -188,27 +219,32 @@ def main():
result = dict(changed=False) result = dict(changed=False)
if proxmox.get_node(node) is None: if node and proxmox.get_node(node) is None:
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node) module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
if not vmid and name: if not vmid and name:
vmid = int(proxmox.get_vmid(name, ignore_missing=False)) vmid = int(proxmox.get_vmid(name, ignore_missing=False))
vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
vms = None vms = None
if type == "lxc": if type == "lxc":
vms = proxmox.get_lxc_vms(node, vmid=vmid) vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
elif type == "qemu": elif type == "qemu":
vms = proxmox.get_qemu_vms(node, vmid=vmid) vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, node)
else: else:
vms = proxmox.get_qemu_vms(node, vmid=vmid) + proxmox.get_lxc_vms( vms = proxmox.get_qemu_vms(
node, vmid=vmid vms_cluster_resources, vmid, node
) ) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
if vms or vmid is None: if vms or vmid is None:
result["proxmox_vms"] = vms result["proxmox_vms"] = vms
module.exit_json(**result) module.exit_json(**result)
else: else:
result["msg"] = "VM with vmid %s doesn't exist on node %s" % (vmid, node) if node is None:
result["msg"] = "VM with vmid %s doesn't exist in cluster" % (vmid)
else:
result["msg"] = "VM with vmid %s doesn't exist on node %s" % (vmid, node)
module.fail_json(**result) module.fail_json(**result)

View file

@ -28,107 +28,276 @@ from ansible_collections.community.general.tests.unit.plugins.modules.utils impo
) )
import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
NODE = "pve" NODE1 = "pve"
LXC_VMS = [ NODE2 = "pve2"
RAW_CLUSTER_OUTPUT = [
{ {
"uptime": 47, "cpu": 0.174069059487628,
"maxswap": 536870912,
"diskread": 0,
"name": "test-lxc.home.arpa",
"status": "running",
"vmid": "102",
"type": "lxc",
"swap": 0,
"cpus": 2,
"mem": 29134848,
"maxdisk": 10737418240,
"diskwrite": 0,
"netin": 35729,
"netout": 446,
"pid": 1412780,
"maxmem": 536870912,
"disk": 307625984,
"cpu": 0,
},
{
"netin": 0,
"netout": 0,
"cpu": 0,
"maxmem": 536870912,
"disk": 0, "disk": 0,
"name": "test1-lxc.home.arpa", "diskread": 6656,
"diskread": 0,
"status": "stopped",
"vmid": "103",
"type": "lxc",
"swap": 0,
"uptime": 0,
"maxswap": 536870912,
"diskwrite": 0, "diskwrite": 0,
"cpus": 2, "id": "qemu/100",
"mem": 0, "maxcpu": 1,
"maxdisk": 10737418240,
},
]
QEMU_VMS = [
{
"vmid": 101,
"diskread": 0,
"status": "stopped",
"name": "test1",
"uptime": 0,
"diskwrite": 0,
"cpus": 1,
"mem": 0,
"maxdisk": 0,
"netout": 0,
"netin": 0,
"cpu": 0,
"maxmem": 536870912,
"disk": 0,
},
{
"netout": 4113,
"netin": 22738,
"pid": 1947197,
"maxmem": 4294967296,
"disk": 0,
"cpu": 0.0795350949559682,
"uptime": 41,
"vmid": 100,
"status": "running",
"diskread": 0,
"name": "pxe.home.arpa",
"cpus": 1,
"mem": 35315629,
"maxdisk": 34359738368, "maxdisk": 34359738368,
"maxmem": 4294967296,
"mem": 35304543,
"name": "pxe.home.arpa",
"netin": 416956,
"netout": 17330,
"node": NODE1,
"status": "running",
"template": 0,
"type": "qemu",
"uptime": 669,
"vmid": 100,
},
{
"cpu": 0,
"disk": 0,
"diskread": 0,
"diskwrite": 0, "diskwrite": 0,
"id": "qemu/101",
"maxcpu": 1,
"maxdisk": 0,
"maxmem": 536870912,
"mem": 0,
"name": "test1",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"template": 0,
"type": "qemu",
"uptime": 0,
"vmid": 101,
},
{
"cpu": 0,
"disk": 352190464,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/102",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"mem": 28192768,
"name": "test-lxc.home.arpa",
"netin": 102757,
"netout": 446,
"node": NODE1,
"status": "running",
"template": 0,
"type": "lxc",
"uptime": 161,
"vmid": 102,
},
{
"cpu": 0,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/103",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"mem": 0,
"name": "test1-lxc.home.arpa",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"template": 0,
"type": "lxc",
"uptime": 0,
"vmid": 103,
},
]
RAW_LXC_OUTPUT = [
{
"cpu": 0,
"cpus": 2,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 0,
"name": "test1-lxc.home.arpa",
"netin": 0,
"netout": 0,
"status": "stopped",
"swap": 0,
"type": "lxc",
"uptime": 0,
"vmid": "103",
},
{
"cpu": 0,
"cpus": 2,
"disk": 352190464,
"diskread": 0,
"diskwrite": 0,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 28192768,
"name": "test-lxc.home.arpa",
"netin": 102757,
"netout": 446,
"pid": 4076752,
"status": "running",
"swap": 0,
"type": "lxc",
"uptime": 161,
"vmid": "102",
},
]
RAW_QEMU_OUTPUT = [
{
"cpu": 0,
"cpus": 1,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"maxdisk": 0,
"maxmem": 536870912,
"mem": 0,
"name": "test1",
"netin": 0,
"netout": 0,
"status": "stopped",
"uptime": 0,
"vmid": 101,
},
{
"cpu": 0.174069059487628,
"cpus": 1,
"disk": 0,
"diskread": 6656,
"diskwrite": 0,
"maxdisk": 34359738368,
"maxmem": 4294967296,
"mem": 35304543,
"name": "pxe.home.arpa",
"netin": 416956,
"netout": 17330,
"pid": 4076688,
"status": "running",
"uptime": 669,
"vmid": 100,
},
]
EXPECTED_VMS_OUTPUT = [
{
"cpu": 0.174069059487628,
"cpus": 1,
"disk": 0,
"diskread": 6656,
"diskwrite": 0,
"id": "qemu/100",
"maxcpu": 1,
"maxdisk": 34359738368,
"maxmem": 4294967296,
"mem": 35304543,
"name": "pxe.home.arpa",
"netin": 416956,
"netout": 17330,
"node": NODE1,
"pid": 4076688,
"status": "running",
"template": False,
"type": "qemu",
"uptime": 669,
"vmid": 100,
},
{
"cpu": 0,
"cpus": 1,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "qemu/101",
"maxcpu": 1,
"maxdisk": 0,
"maxmem": 536870912,
"mem": 0,
"name": "test1",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"template": False,
"type": "qemu",
"uptime": 0,
"vmid": 101,
},
{
"cpu": 0,
"cpus": 2,
"disk": 352190464,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/102",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 28192768,
"name": "test-lxc.home.arpa",
"netin": 102757,
"netout": 446,
"node": NODE1,
"pid": 4076752,
"status": "running",
"swap": 0,
"template": False,
"type": "lxc",
"uptime": 161,
"vmid": 102,
},
{
"cpu": 0,
"cpus": 2,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/103",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 0,
"name": "test1-lxc.home.arpa",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"swap": 0,
"template": False,
"type": "lxc",
"uptime": 0,
"vmid": 103,
}, },
] ]
def get_module_args(type="all", vmid=None, name=None): def get_module_args(type="all", node=None, vmid=None, name=None):
return { return {
"api_host": "host", "api_host": "host",
"api_user": "user", "api_user": "user",
"api_password": "password", "api_password": "password",
"node": NODE, "node": node,
"type": type, "type": type,
"vmid": vmid, "vmid": vmid,
"name": name, "name": name,
} }
def normalized_expected_vms_output(vms):
result = [vm.copy() for vm in vms]
for vm in result:
if "type" not in vm:
# response for QEMU VMs doesn't contain type field, adding it
vm["type"] = "qemu"
vm["vmid"] = int(vm["vmid"])
return result
class TestProxmoxVmInfoModule(ModuleTestCase): class TestProxmoxVmInfoModule(ModuleTestCase):
def setUp(self): def setUp(self):
super(TestProxmoxVmInfoModule, self).setUp() super(TestProxmoxVmInfoModule, self).setUp()
@ -138,12 +307,15 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
"ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect", "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect",
).start() ).start()
self.connect_mock.return_value.nodes.return_value.lxc.return_value.get.return_value = ( self.connect_mock.return_value.nodes.return_value.lxc.return_value.get.return_value = (
LXC_VMS RAW_LXC_OUTPUT
) )
self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.return_value = ( self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.return_value = (
QEMU_VMS RAW_QEMU_OUTPUT
) )
self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE}] self.connect_mock.return_value.cluster.return_value.resources.return_value.get.return_value = (
RAW_CLUSTER_OUTPUT
)
self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE1}]
def tearDown(self): def tearDown(self):
self.connect_mock.stop() self.connect_mock.stop()
@ -155,7 +327,7 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
self.module.main() self.module.main()
result = exc_info.value.args[0] result = exc_info.value.args[0]
assert result["msg"] == "missing required arguments: api_host, api_user, node" assert result["msg"] == "missing required arguments: api_host, api_user"
def test_get_lxc_vms_information(self): def test_get_lxc_vms_information(self):
with pytest.raises(AnsibleExitJson) as exc_info: with pytest.raises(AnsibleExitJson) as exc_info:
@ -164,36 +336,34 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
result = exc_info.value.args[0] result = exc_info.value.args[0]
assert result["changed"] is False assert result["changed"] is False
assert result["proxmox_vms"] == LXC_VMS assert result["proxmox_vms"] == [
vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "lxc"
]
def test_get_qemu_vms_information(self): def test_get_qemu_vms_information(self):
with pytest.raises(AnsibleExitJson) as exc_info: with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = normalized_expected_vms_output(QEMU_VMS)
set_module_args(get_module_args(type="qemu")) set_module_args(get_module_args(type="qemu"))
self.module.main() self.module.main()
result = exc_info.value.args[0] result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output assert result["proxmox_vms"] == [
vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "qemu"
]
def test_get_all_vms_information(self): def test_get_all_vms_information(self):
with pytest.raises(AnsibleExitJson) as exc_info: with pytest.raises(AnsibleExitJson) as exc_info:
qemu_output = normalized_expected_vms_output(QEMU_VMS)
expected_output = qemu_output + LXC_VMS
set_module_args(get_module_args()) set_module_args(get_module_args())
self.module.main() self.module.main()
result = exc_info.value.args[0] result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output assert result["proxmox_vms"] == EXPECTED_VMS_OUTPUT
def test_vmid_is_converted_to_int(self): def test_vmid_is_converted_to_int(self):
with pytest.raises(AnsibleExitJson) as exc_info: with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = normalized_expected_vms_output(LXC_VMS)
set_module_args(get_module_args(type="lxc")) set_module_args(get_module_args(type="lxc"))
self.module.main() self.module.main()
result = exc_info.value.args[0] result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert isinstance(result["proxmox_vms"][0]["vmid"], int) assert isinstance(result["proxmox_vms"][0]["vmid"], int)
def test_get_specific_lxc_vm_information(self): def test_get_specific_lxc_vm_information(self):
@ -201,8 +371,8 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
vmid = 102 vmid = 102
expected_output = [ expected_output = [
vm vm
for vm in normalized_expected_vms_output(LXC_VMS) for vm in EXPECTED_VMS_OUTPUT
if vm["vmid"] == vmid if vm["vmid"] == vmid and vm["type"] == "lxc"
] ]
set_module_args(get_module_args(type="lxc", vmid=vmid)) set_module_args(get_module_args(type="lxc", vmid=vmid))
self.module.main() self.module.main()
@ -216,8 +386,8 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
vmid = 100 vmid = 100
expected_output = [ expected_output = [
vm vm
for vm in normalized_expected_vms_output(QEMU_VMS) for vm in EXPECTED_VMS_OUTPUT
if vm["vmid"] == vmid if vm["vmid"] == vmid and vm["type"] == "qemu"
] ]
set_module_args(get_module_args(type="qemu", vmid=vmid)) set_module_args(get_module_args(type="qemu", vmid=vmid))
self.module.main() self.module.main()
@ -229,11 +399,7 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
def test_get_specific_vm_information(self): def test_get_specific_vm_information(self):
with pytest.raises(AnsibleExitJson) as exc_info: with pytest.raises(AnsibleExitJson) as exc_info:
vmid = 100 vmid = 100
expected_output = [ expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["vmid"] == vmid]
vm
for vm in normalized_expected_vms_output(QEMU_VMS + LXC_VMS)
if vm["vmid"] == vmid
]
set_module_args(get_module_args(type="all", vmid=vmid)) set_module_args(get_module_args(type="all", vmid=vmid))
self.module.main() self.module.main()
@ -248,11 +414,7 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
] ]
with pytest.raises(AnsibleExitJson) as exc_info: with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [ expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
vm
for vm in normalized_expected_vms_output(QEMU_VMS + LXC_VMS)
if vm["name"] == name
]
set_module_args(get_module_args(type="all", name=name)) set_module_args(get_module_args(type="all", name=name))
self.module.main() self.module.main()
@ -260,14 +422,65 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
assert result["proxmox_vms"] == expected_output assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 1 assert len(result["proxmox_vms"]) == 1
def test_get_all_lxc_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["node"] == NODE1 and vm["type"] == "lxc"
]
set_module_args(get_module_args(type="lxc", node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 1
def test_get_all_qemu_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["node"] == NODE1 and vm["type"] == "qemu"
]
set_module_args(get_module_args(type="qemu", node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 1
def test_get_all_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["node"] == NODE1
]
set_module_args(get_module_args(node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 2
def test_module_fail_when_vm_does_not_exist_on_node(self): def test_module_fail_when_vm_does_not_exist_on_node(self):
with pytest.raises(AnsibleFailJson) as exc_info:
vmid = 200
set_module_args(get_module_args(type="all", vmid=vmid, node=NODE1))
self.module.main()
result = exc_info.value.args[0]
assert result["msg"] == "VM with vmid 200 doesn't exist on node pve"
def test_module_fail_when_vm_does_not_exist_in_cluster(self):
with pytest.raises(AnsibleFailJson) as exc_info: with pytest.raises(AnsibleFailJson) as exc_info:
vmid = 200 vmid = 200
set_module_args(get_module_args(type="all", vmid=vmid)) set_module_args(get_module_args(type="all", vmid=vmid))
self.module.main() self.module.main()
result = exc_info.value.args[0] result = exc_info.value.args[0]
assert result["msg"] == "VM with vmid 200 doesn't exist on node pve" assert result["msg"] == "VM with vmid 200 doesn't exist in cluster"
def test_module_fail_when_qemu_request_fails(self): def test_module_fail_when_qemu_request_fails(self):
self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError( self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError(
@ -291,10 +504,24 @@ class TestProxmoxVmInfoModule(ModuleTestCase):
result = exc_info.value.args[0] result = exc_info.value.args[0]
assert "Failed to retrieve LXC VMs information:" in result["msg"] assert "Failed to retrieve LXC VMs information:" in result["msg"]
def test_module_fail_when_cluster_resources_request_fails(self):
self.connect_mock.return_value.cluster.return_value.resources.return_value.get.side_effect = IOError(
"Some mocked connection error."
)
with pytest.raises(AnsibleFailJson) as exc_info:
set_module_args(get_module_args())
self.module.main()
result = exc_info.value.args[0]
assert (
"Failed to retrieve VMs information from cluster resources:"
in result["msg"]
)
def test_module_fail_when_node_does_not_exist(self): def test_module_fail_when_node_does_not_exist(self):
self.connect_mock.return_value.nodes.get.return_value = [] self.connect_mock.return_value.nodes.get.return_value = []
with pytest.raises(AnsibleFailJson) as exc_info: with pytest.raises(AnsibleFailJson) as exc_info:
set_module_args(get_module_args(type="all")) set_module_args(get_module_args(type="all", node=NODE1))
self.module.main() self.module.main()
result = exc_info.value.args[0] result = exc_info.value.args[0]