mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
[PR #6709/fb04dc3d backport][stable-7] proxmox_kvm - Allow creation of VM with existing name but new vmid (#6729)
proxmox_kvm - Allow creation of VM with existing name but new vmid (#6709)
* proxmox_kvm - Allow creation of VM with existing name but new vmid
* Fix pylint and pep8 errors
* Add changelog fragment
* Move status variable outside of try block
* Add assertion for calling get_vm_node function
* Use try/catch for module_utils functions
* Update changelogs/fragments/6709-proxmox-create-vm-with-existing-name.yml
Co-authored-by: Felix Fontein <felix@fontein.de>
---------
Co-authored-by: Felix Fontein <felix@fontein.de>
(cherry picked from commit fb04dc3db2
)
Co-authored-by: Sergei Antipov <greendayonfire@gmail.com>
This commit is contained in:
parent
e261332acf
commit
1592be779a
4 changed files with 138 additions and 42 deletions
|
@ -0,0 +1,2 @@
|
||||||
|
bugfixes:
|
||||||
|
- proxmox_kvm - allow creation of VM with existing name but new vmid (https://github.com/ansible-collections/community.general/issues/6155, https://github.com/ansible-collections/community.general/pull/6709).
|
|
@ -107,19 +107,30 @@ class ProxmoxAnsible(object):
|
||||||
self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
|
self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
|
||||||
|
|
||||||
def version(self):
|
def version(self):
|
||||||
apireturn = self.proxmox_api.version.get()
|
try:
|
||||||
return LooseVersion(apireturn['version'])
|
apiversion = self.proxmox_api.version.get()
|
||||||
|
return LooseVersion(apiversion['version'])
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg='Unable to retrieve Proxmox VE version: %s' % e)
|
||||||
|
|
||||||
def get_node(self, node):
|
def get_node(self, node):
|
||||||
nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
|
try:
|
||||||
|
nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg='Unable to retrieve Proxmox VE node: %s' % e)
|
||||||
return nodes[0] if nodes else None
|
return nodes[0] if nodes else None
|
||||||
|
|
||||||
def get_nextvmid(self):
|
def get_nextvmid(self):
|
||||||
vmid = self.proxmox_api.cluster.nextid.get()
|
try:
|
||||||
return vmid
|
return self.proxmox_api.cluster.nextid.get()
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg='Unable to retrieve next free vmid: %s' % e)
|
||||||
|
|
||||||
def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False):
|
def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False):
|
||||||
vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
|
try:
|
||||||
|
vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg='Unable to retrieve list of VMs filtered by name %s: %s' % (name, e))
|
||||||
|
|
||||||
if not vms:
|
if not vms:
|
||||||
if ignore_missing:
|
if ignore_missing:
|
||||||
|
@ -132,7 +143,10 @@ class ProxmoxAnsible(object):
|
||||||
return vms[0]
|
return vms[0]
|
||||||
|
|
||||||
def get_vm(self, vmid, ignore_missing=False):
|
def get_vm(self, vmid, ignore_missing=False):
|
||||||
vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
|
try:
|
||||||
|
vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg='Unable to retrieve list of VMs filtered by vmid %s: %s' % (vmid, e))
|
||||||
|
|
||||||
if vms:
|
if vms:
|
||||||
return vms[0]
|
return vms[0]
|
||||||
|
@ -143,8 +157,11 @@ class ProxmoxAnsible(object):
|
||||||
self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
|
self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
|
||||||
|
|
||||||
def api_task_ok(self, node, taskid):
|
def api_task_ok(self, node, taskid):
|
||||||
status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
try:
|
||||||
return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
|
status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||||
|
return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e))
|
||||||
|
|
||||||
def get_pool(self, poolid):
|
def get_pool(self, poolid):
|
||||||
"""Retrieve pool information
|
"""Retrieve pool information
|
||||||
|
|
|
@ -1323,18 +1323,18 @@ def main():
|
||||||
module.fail_json(vmid=vmid, msg='Unable to migrate VM {0} from {1} to {2}: {3}'.format(vmid, vm_node, node, e))
|
module.fail_json(vmid=vmid, msg='Unable to migrate VM {0} from {1} to {2}: {3}'.format(vmid, vm_node, node, e))
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
try:
|
if not (update or clone) and proxmox.get_vm(vmid, ignore_missing=True):
|
||||||
if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone):
|
module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
|
||||||
module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
|
elif not (update or clone or vmid) and proxmox.get_vmid(name, ignore_missing=True):
|
||||||
elif proxmox.get_vmid(name, ignore_missing=True) and not (update or clone):
|
module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name)
|
||||||
module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name)
|
elif not node:
|
||||||
elif not node:
|
module.fail_json(msg='node is mandatory for creating/updating VM')
|
||||||
module.fail_json(msg='node is mandatory for creating/updating VM')
|
elif update and not any([vmid, name]):
|
||||||
elif update and not any([vmid, name]):
|
module.fail_json(msg='vmid or name is mandatory for updating VM')
|
||||||
module.fail_json(msg='vmid or name is mandatory for updating VM')
|
elif not proxmox.get_node(node):
|
||||||
elif not proxmox.get_node(node):
|
module.fail_json(msg="node '%s' does not exist in cluster" % node)
|
||||||
module.fail_json(msg="node '%s' does not exist in cluster" % node)
|
|
||||||
|
|
||||||
|
try:
|
||||||
proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update,
|
proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update,
|
||||||
archive=module.params['archive'],
|
archive=module.params['archive'],
|
||||||
acpi=module.params['acpi'],
|
acpi=module.params['acpi'],
|
||||||
|
@ -1405,12 +1405,6 @@ def main():
|
||||||
sata=module.params['sata'],
|
sata=module.params['sata'],
|
||||||
scsi=module.params['scsi'],
|
scsi=module.params['scsi'],
|
||||||
virtio=module.params['virtio'])
|
virtio=module.params['virtio'])
|
||||||
if update:
|
|
||||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
|
|
||||||
elif clone is not None:
|
|
||||||
module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
|
|
||||||
else:
|
|
||||||
module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if update:
|
if update:
|
||||||
module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
|
module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
|
||||||
|
@ -1419,11 +1413,19 @@ def main():
|
||||||
else:
|
else:
|
||||||
module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
|
module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
|
||||||
|
|
||||||
|
if update:
|
||||||
|
module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
|
||||||
|
elif clone is not None:
|
||||||
|
module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
|
||||||
|
else:
|
||||||
|
module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
|
||||||
|
|
||||||
elif state == 'started':
|
elif state == 'started':
|
||||||
|
if not vmid:
|
||||||
|
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||||
|
|
||||||
status = {}
|
status = {}
|
||||||
try:
|
try:
|
||||||
if not vmid:
|
|
||||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
|
||||||
vm = proxmox.get_vm(vmid)
|
vm = proxmox.get_vm(vmid)
|
||||||
status['status'] = vm['status']
|
status['status'] = vm['status']
|
||||||
if vm['status'] == 'running':
|
if vm['status'] == 'running':
|
||||||
|
@ -1435,11 +1437,11 @@ def main():
|
||||||
module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
|
module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||||
|
|
||||||
elif state == 'stopped':
|
elif state == 'stopped':
|
||||||
|
if not vmid:
|
||||||
|
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||||
|
|
||||||
status = {}
|
status = {}
|
||||||
try:
|
try:
|
||||||
if not vmid:
|
|
||||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
|
||||||
|
|
||||||
vm = proxmox.get_vm(vmid)
|
vm = proxmox.get_vm(vmid)
|
||||||
|
|
||||||
status['status'] = vm['status']
|
status['status'] = vm['status']
|
||||||
|
@ -1452,11 +1454,11 @@ def main():
|
||||||
module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
|
module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||||
|
|
||||||
elif state == 'restarted':
|
elif state == 'restarted':
|
||||||
|
if not vmid:
|
||||||
|
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||||
|
|
||||||
status = {}
|
status = {}
|
||||||
try:
|
try:
|
||||||
if not vmid:
|
|
||||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
|
||||||
|
|
||||||
vm = proxmox.get_vm(vmid)
|
vm = proxmox.get_vm(vmid)
|
||||||
status['status'] = vm['status']
|
status['status'] = vm['status']
|
||||||
if vm['status'] == 'stopped':
|
if vm['status'] == 'stopped':
|
||||||
|
@ -1471,6 +1473,7 @@ def main():
|
||||||
status = {}
|
status = {}
|
||||||
if not vmid:
|
if not vmid:
|
||||||
module.exit_json(changed=False, msg='VM with name = %s is already absent' % name)
|
module.exit_json(changed=False, msg='VM with name = %s is already absent' % name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
vm = proxmox.get_vm(vmid, ignore_missing=True)
|
vm = proxmox.get_vm(vmid, ignore_missing=True)
|
||||||
if not vm:
|
if not vm:
|
||||||
|
|
|
@ -4,17 +4,91 @@
|
||||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import absolute_import, division, print_function
|
||||||
|
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
from ansible_collections.community.general.plugins.modules.proxmox_kvm import parse_dev, parse_mac
|
import pytest
|
||||||
|
|
||||||
|
from ansible_collections.community.general.plugins.modules import proxmox_kvm
|
||||||
|
from ansible_collections.community.general.tests.unit.compat.mock import patch
|
||||||
|
from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
|
||||||
|
AnsibleExitJson,
|
||||||
|
AnsibleFailJson,
|
||||||
|
ModuleTestCase,
|
||||||
|
set_module_args,
|
||||||
|
)
|
||||||
|
import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
|
||||||
|
|
||||||
|
|
||||||
def test_parse_mac():
|
class TestProxmoxKvmModule(ModuleTestCase):
|
||||||
assert parse_mac('virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1') == '00:11:22:AA:BB:CC'
|
def setUp(self):
|
||||||
|
super(TestProxmoxKvmModule, self).setUp()
|
||||||
|
proxmox_utils.HAS_PROXMOXER = True
|
||||||
|
self.module = proxmox_kvm
|
||||||
|
self.connect_mock = patch(
|
||||||
|
"ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect"
|
||||||
|
)
|
||||||
|
self.connect_mock.start()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.connect_mock.stop()
|
||||||
|
super(TestProxmoxKvmModule, self).tearDown()
|
||||||
|
|
||||||
def test_parse_dev():
|
def test_module_fail_when_required_args_missing(self):
|
||||||
assert parse_dev('local-lvm:vm-1000-disk-0,format=qcow2') == 'local-lvm:vm-1000-disk-0'
|
with self.assertRaises(AnsibleFailJson):
|
||||||
assert parse_dev('local-lvm:vm-101-disk-1,size=8G') == 'local-lvm:vm-101-disk-1'
|
set_module_args({})
|
||||||
assert parse_dev('local-zfs:vm-1001-disk-0') == 'local-zfs:vm-1001-disk-0'
|
self.module.main()
|
||||||
|
|
||||||
|
def test_module_exits_unchaged_when_provided_vmid_exists(self):
|
||||||
|
set_module_args(
|
||||||
|
{
|
||||||
|
"api_host": "host",
|
||||||
|
"api_user": "user",
|
||||||
|
"api_password": "password",
|
||||||
|
"vmid": "100",
|
||||||
|
"node": "pve",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
with patch.object(proxmox_utils.ProxmoxAnsible, "get_vm") as get_vm_mock:
|
||||||
|
get_vm_mock.return_value = [{"vmid": "100"}]
|
||||||
|
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||||
|
self.module.main()
|
||||||
|
|
||||||
|
assert get_vm_mock.call_count == 1
|
||||||
|
result = exc_info.value.args[0]
|
||||||
|
assert result["changed"] is False
|
||||||
|
assert result["msg"] == "VM with vmid <100> already exists"
|
||||||
|
|
||||||
|
@patch.object(proxmox_kvm.ProxmoxKvmAnsible, "create_vm")
|
||||||
|
def test_vm_created_when_vmid_not_exist_but_name_already_exist(self, create_vm_mock):
|
||||||
|
set_module_args(
|
||||||
|
{
|
||||||
|
"api_host": "host",
|
||||||
|
"api_user": "user",
|
||||||
|
"api_password": "password",
|
||||||
|
"vmid": "100",
|
||||||
|
"name": "existing.vm.local",
|
||||||
|
"node": "pve",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
with patch.object(proxmox_utils.ProxmoxAnsible, "get_vm") as get_vm_mock:
|
||||||
|
with patch.object(proxmox_utils.ProxmoxAnsible, "get_node") as get_node_mock:
|
||||||
|
get_vm_mock.return_value = None
|
||||||
|
get_node_mock.return_value = {"node": "pve", "status": "online"}
|
||||||
|
with pytest.raises(AnsibleExitJson) as exc_info:
|
||||||
|
self.module.main()
|
||||||
|
|
||||||
|
assert get_vm_mock.call_count == 1
|
||||||
|
assert get_node_mock.call_count == 1
|
||||||
|
result = exc_info.value.args[0]
|
||||||
|
assert result["changed"] is True
|
||||||
|
assert result["msg"] == "VM existing.vm.local with vmid 100 deployed"
|
||||||
|
|
||||||
|
def test_parse_mac(self):
|
||||||
|
assert proxmox_kvm.parse_mac("virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1") == "00:11:22:AA:BB:CC"
|
||||||
|
|
||||||
|
def test_parse_dev(self):
|
||||||
|
assert proxmox_kvm.parse_dev("local-lvm:vm-1000-disk-0,format=qcow2") == "local-lvm:vm-1000-disk-0"
|
||||||
|
assert proxmox_kvm.parse_dev("local-lvm:vm-101-disk-1,size=8G") == "local-lvm:vm-101-disk-1"
|
||||||
|
assert proxmox_kvm.parse_dev("local-zfs:vm-1001-disk-0") == "local-zfs:vm-1001-disk-0"
|
||||||
|
|
Loading…
Reference in a new issue