diff --git a/changelogs/fragments/6709-proxmox-create-vm-with-existing-name.yml b/changelogs/fragments/6709-proxmox-create-vm-with-existing-name.yml new file mode 100644 index 0000000000..5e79862ef1 --- /dev/null +++ b/changelogs/fragments/6709-proxmox-create-vm-with-existing-name.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox_kvm - allow creation of VM with existing name but new vmid (https://github.com/ansible-collections/community.general/issues/6155, https://github.com/ansible-collections/community.general/pull/6709). diff --git a/plugins/module_utils/proxmox.py b/plugins/module_utils/proxmox.py index 9f3a55cac0..47526e6b7b 100644 --- a/plugins/module_utils/proxmox.py +++ b/plugins/module_utils/proxmox.py @@ -107,19 +107,30 @@ class ProxmoxAnsible(object): self.module.fail_json(msg='%s' % e, exception=traceback.format_exc()) def version(self): - apireturn = self.proxmox_api.version.get() - return LooseVersion(apireturn['version']) + try: + apiversion = self.proxmox_api.version.get() + return LooseVersion(apiversion['version']) + except Exception as e: + self.module.fail_json(msg='Unable to retrieve Proxmox VE version: %s' % e) def get_node(self, node): - nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node] + try: + nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node] + except Exception as e: + self.module.fail_json(msg='Unable to retrieve Proxmox VE node: %s' % e) return nodes[0] if nodes else None def get_nextvmid(self): - vmid = self.proxmox_api.cluster.nextid.get() - return vmid + try: + return self.proxmox_api.cluster.nextid.get() + except Exception as e: + self.module.fail_json(msg='Unable to retrieve next free vmid: %s' % e) def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False): - vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name] + try: + vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name] + except Exception as e: + self.module.fail_json(msg='Unable to retrieve list of VMs filtered by name %s: %s' % (name, e)) if not vms: if ignore_missing: @@ -132,7 +143,10 @@ class ProxmoxAnsible(object): return vms[0] def get_vm(self, vmid, ignore_missing=False): - vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] + try: + vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] + except Exception as e: + self.module.fail_json(msg='Unable to retrieve list of VMs filtered by vmid %s: %s' % (vmid, e)) if vms: return vms[0] @@ -143,8 +157,11 @@ class ProxmoxAnsible(object): self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid) def api_task_ok(self, node, taskid): - status = self.proxmox_api.nodes(node).tasks(taskid).status.get() - return status['status'] == 'stopped' and status['exitstatus'] == 'OK' + try: + status = self.proxmox_api.nodes(node).tasks(taskid).status.get() + return status['status'] == 'stopped' and status['exitstatus'] == 'OK' + except Exception as e: + self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e)) def get_pool(self, poolid): """Retrieve pool information diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py index 351659232f..a4258a682d 100644 --- a/plugins/modules/proxmox_kvm.py +++ b/plugins/modules/proxmox_kvm.py @@ -1323,18 +1323,18 @@ def main(): module.fail_json(vmid=vmid, msg='Unable to migrate VM {0} from {1} to {2}: {3}'.format(vmid, vm_node, node, e)) if state == 'present': - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone): - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) - elif proxmox.get_vmid(name, ignore_missing=True) and not (update or clone): - module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name) - elif not node: - module.fail_json(msg='node is mandatory for creating/updating VM') - elif update and not any([vmid, name]): - module.fail_json(msg='vmid or name is mandatory for updating VM') - elif not proxmox.get_node(node): - module.fail_json(msg="node '%s' does not exist in cluster" % node) + if not (update or clone) and proxmox.get_vm(vmid, ignore_missing=True): + module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) + elif not (update or clone or vmid) and proxmox.get_vmid(name, ignore_missing=True): + module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name) + elif not node: + module.fail_json(msg='node is mandatory for creating/updating VM') + elif update and not any([vmid, name]): + module.fail_json(msg='vmid or name is mandatory for updating VM') + elif not proxmox.get_node(node): + module.fail_json(msg="node '%s' does not exist in cluster" % node) + try: proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update, archive=module.params['archive'], acpi=module.params['acpi'], @@ -1405,12 +1405,6 @@ def main(): sata=module.params['sata'], scsi=module.params['scsi'], virtio=module.params['virtio']) - if update: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) - elif clone is not None: - module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) - else: - module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) except Exception as e: if update: module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) @@ -1419,11 +1413,19 @@ def main(): else: module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) + if update: + module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) + elif clone is not None: + module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) + else: + module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) + elif state == 'started': + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % name) + status = {} try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) vm = proxmox.get_vm(vmid) status['status'] = vm['status'] if vm['status'] == 'running': @@ -1435,11 +1437,11 @@ def main(): module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'stopped': + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % name) + status = {} try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = proxmox.get_vm(vmid) status['status'] = vm['status'] @@ -1452,11 +1454,11 @@ def main(): module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'restarted': + if not vmid: + module.fail_json(msg='VM with name = %s does not exist in cluster' % name) + status = {} try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = proxmox.get_vm(vmid) status['status'] = vm['status'] if vm['status'] == 'stopped': @@ -1471,6 +1473,7 @@ def main(): status = {} if not vmid: module.exit_json(changed=False, msg='VM with name = %s is already absent' % name) + try: vm = proxmox.get_vm(vmid, ignore_missing=True) if not vm: diff --git a/tests/unit/plugins/modules/test_proxmox_kvm.py b/tests/unit/plugins/modules/test_proxmox_kvm.py index 5311851027..c86d804d71 100644 --- a/tests/unit/plugins/modules/test_proxmox_kvm.py +++ b/tests/unit/plugins/modules/test_proxmox_kvm.py @@ -4,17 +4,91 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -from ansible_collections.community.general.plugins.modules.proxmox_kvm import parse_dev, parse_mac +import pytest + +from ansible_collections.community.general.plugins.modules import proxmox_kvm +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) +import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils -def test_parse_mac(): - assert parse_mac('virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1') == '00:11:22:AA:BB:CC' +class TestProxmoxKvmModule(ModuleTestCase): + def setUp(self): + super(TestProxmoxKvmModule, self).setUp() + proxmox_utils.HAS_PROXMOXER = True + self.module = proxmox_kvm + self.connect_mock = patch( + "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect" + ) + self.connect_mock.start() + def tearDown(self): + self.connect_mock.stop() + super(TestProxmoxKvmModule, self).tearDown() -def test_parse_dev(): - assert parse_dev('local-lvm:vm-1000-disk-0,format=qcow2') == 'local-lvm:vm-1000-disk-0' - assert parse_dev('local-lvm:vm-101-disk-1,size=8G') == 'local-lvm:vm-101-disk-1' - assert parse_dev('local-zfs:vm-1001-disk-0') == 'local-zfs:vm-1001-disk-0' + def test_module_fail_when_required_args_missing(self): + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_module_exits_unchaged_when_provided_vmid_exists(self): + set_module_args( + { + "api_host": "host", + "api_user": "user", + "api_password": "password", + "vmid": "100", + "node": "pve", + } + ) + with patch.object(proxmox_utils.ProxmoxAnsible, "get_vm") as get_vm_mock: + get_vm_mock.return_value = [{"vmid": "100"}] + with pytest.raises(AnsibleExitJson) as exc_info: + self.module.main() + + assert get_vm_mock.call_count == 1 + result = exc_info.value.args[0] + assert result["changed"] is False + assert result["msg"] == "VM with vmid <100> already exists" + + @patch.object(proxmox_kvm.ProxmoxKvmAnsible, "create_vm") + def test_vm_created_when_vmid_not_exist_but_name_already_exist(self, create_vm_mock): + set_module_args( + { + "api_host": "host", + "api_user": "user", + "api_password": "password", + "vmid": "100", + "name": "existing.vm.local", + "node": "pve", + } + ) + with patch.object(proxmox_utils.ProxmoxAnsible, "get_vm") as get_vm_mock: + with patch.object(proxmox_utils.ProxmoxAnsible, "get_node") as get_node_mock: + get_vm_mock.return_value = None + get_node_mock.return_value = {"node": "pve", "status": "online"} + with pytest.raises(AnsibleExitJson) as exc_info: + self.module.main() + + assert get_vm_mock.call_count == 1 + assert get_node_mock.call_count == 1 + result = exc_info.value.args[0] + assert result["changed"] is True + assert result["msg"] == "VM existing.vm.local with vmid 100 deployed" + + def test_parse_mac(self): + assert proxmox_kvm.parse_mac("virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1") == "00:11:22:AA:BB:CC" + + def test_parse_dev(self): + assert proxmox_kvm.parse_dev("local-lvm:vm-1000-disk-0,format=qcow2") == "local-lvm:vm-1000-disk-0" + assert proxmox_kvm.parse_dev("local-lvm:vm-101-disk-1,size=8G") == "local-lvm:vm-101-disk-1" + assert proxmox_kvm.parse_dev("local-zfs:vm-1001-disk-0") == "local-zfs:vm-1001-disk-0"