mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
[feat] proxmox_snap: snapshot containers with configured mountpoints (#5274)
* module_utils.proxmox: new `api_task_ok` helper + integrated with existing modules * proxmox_snap: add `unbind` param to snapshot containers with mountpoints * [fix] errors reported by 'test sanity pep8' at https://github.com/ansible-collections/community.general/pull/5274#issuecomment-1242932079 * module_utils.proxmox.api_task_ok: small improvement * proxmox_snap.unbind: version_added, formatting errors, changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein <felix@fontein.de> * proxmox_snap.unbind: update version_added tag Co-authored-by: Felix Fontein <felix@fontein.de> Co-authored-by: Felix Fontein <felix@fontein.de>
This commit is contained in:
parent
f3bcfa5a75
commit
25e3031c2f
6 changed files with 117 additions and 22 deletions
|
@ -0,0 +1,3 @@
|
||||||
|
minor_changes:
|
||||||
|
- proxmox_snap - add ``unbind`` param to support snapshotting containers with configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274).
|
||||||
|
- proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274).
|
|
@ -137,3 +137,7 @@ class ProxmoxAnsible(object):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
|
self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
|
||||||
|
|
||||||
|
def api_task_ok(self, node, taskid):
|
||||||
|
status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
||||||
|
return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
|
||||||
|
|
|
@ -482,8 +482,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||||
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
|
||||||
|
|
||||||
while timeout:
|
while timeout:
|
||||||
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
|
if self.api_task_ok(node, taskid):
|
||||||
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
|
||||||
return True
|
return True
|
||||||
timeout -= 1
|
timeout -= 1
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
|
@ -496,8 +495,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||||
def start_instance(self, vm, vmid, timeout):
|
def start_instance(self, vm, vmid, timeout):
|
||||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post()
|
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post()
|
||||||
while timeout:
|
while timeout:
|
||||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
|
||||||
return True
|
return True
|
||||||
timeout -= 1
|
timeout -= 1
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
|
@ -513,8 +511,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||||
else:
|
else:
|
||||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post()
|
||||||
while timeout:
|
while timeout:
|
||||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
|
||||||
return True
|
return True
|
||||||
timeout -= 1
|
timeout -= 1
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
|
@ -527,8 +524,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
|
||||||
def umount_instance(self, vm, vmid, timeout):
|
def umount_instance(self, vm, vmid, timeout):
|
||||||
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
|
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
|
||||||
while timeout:
|
while timeout:
|
||||||
if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
|
||||||
return True
|
return True
|
||||||
timeout -= 1
|
timeout -= 1
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
|
@ -775,8 +771,7 @@ def main():
|
||||||
taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params)
|
taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params)
|
||||||
|
|
||||||
while timeout:
|
while timeout:
|
||||||
task_status = proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
if proxmox.api_task_ok(vm['node'], taskid):
|
||||||
if (task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK'):
|
|
||||||
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
module.exit_json(changed=True, msg="VM %s removed" % vmid)
|
||||||
timeout -= 1
|
timeout -= 1
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
|
|
|
@ -866,8 +866,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
|
||||||
timeout = self.module.params['timeout']
|
timeout = self.module.params['timeout']
|
||||||
|
|
||||||
while timeout:
|
while timeout:
|
||||||
task = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
if self.api_task_ok(node, taskid):
|
||||||
if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
|
|
||||||
# Wait an extra second as the API can be a ahead of the hypervisor
|
# Wait an extra second as the API can be a ahead of the hypervisor
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -38,6 +38,17 @@ options:
|
||||||
- For removal from config file, even if removing disk snapshot fails.
|
- For removal from config file, even if removing disk snapshot fails.
|
||||||
default: false
|
default: false
|
||||||
type: bool
|
type: bool
|
||||||
|
unbind:
|
||||||
|
description:
|
||||||
|
- This option only applies to LXC containers.
|
||||||
|
- Allows to snapshot a container even if it has configured mountpoints.
|
||||||
|
- Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration.
|
||||||
|
- If running, the container will be stopped and restarted to apply config changes.
|
||||||
|
- Due to restrictions in the Proxmox API this option can only be used authenticating as C(root@pam) with I(api_password), API tokens do not work either.
|
||||||
|
- See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details.
|
||||||
|
default: false
|
||||||
|
type: bool
|
||||||
|
version_added: 5.7.0
|
||||||
vmstate:
|
vmstate:
|
||||||
description:
|
description:
|
||||||
- Snapshot includes RAM.
|
- Snapshot includes RAM.
|
||||||
|
@ -78,6 +89,16 @@ EXAMPLES = r'''
|
||||||
state: present
|
state: present
|
||||||
snapname: pre-updates
|
snapname: pre-updates
|
||||||
|
|
||||||
|
- name: Create new snapshot for a container with configured mountpoints
|
||||||
|
community.general.proxmox_snap:
|
||||||
|
api_user: root@pam
|
||||||
|
api_password: 1q2w3e
|
||||||
|
api_host: node1
|
||||||
|
vmid: 100
|
||||||
|
state: present
|
||||||
|
unbind: true # requires root@pam+password auth, API tokens are not supported
|
||||||
|
snapname: pre-updates
|
||||||
|
|
||||||
- name: Remove container snapshot
|
- name: Remove container snapshot
|
||||||
community.general.proxmox_snap:
|
community.general.proxmox_snap:
|
||||||
api_user: root@pam
|
api_user: root@pam
|
||||||
|
@ -110,17 +131,89 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||||
def snapshot(self, vm, vmid):
|
def snapshot(self, vm, vmid):
|
||||||
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot
|
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot
|
||||||
|
|
||||||
def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate):
|
def vmconfig(self, vm, vmid):
|
||||||
|
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).config
|
||||||
|
|
||||||
|
def vmstatus(self, vm, vmid):
|
||||||
|
return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).status
|
||||||
|
|
||||||
|
def _container_mp_get(self, vm, vmid):
|
||||||
|
cfg = self.vmconfig(vm, vmid).get()
|
||||||
|
mountpoints = {}
|
||||||
|
for key, value in cfg.items():
|
||||||
|
if key.startswith('mp'):
|
||||||
|
mountpoints[key] = value
|
||||||
|
return mountpoints
|
||||||
|
|
||||||
|
def _container_mp_disable(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
|
||||||
|
# shutdown container if running
|
||||||
|
if vmstatus == 'running':
|
||||||
|
self.shutdown_instance(vm, vmid, timeout)
|
||||||
|
# delete all mountpoints configs
|
||||||
|
self.vmconfig(vm, vmid).put(delete=' '.join(mountpoints))
|
||||||
|
|
||||||
|
def _container_mp_restore(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
|
||||||
|
# NOTE: requires auth as `root@pam`, API tokens are not supported
|
||||||
|
# see https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config
|
||||||
|
# restore original config
|
||||||
|
self.vmconfig(vm, vmid).put(**mountpoints)
|
||||||
|
# start container (if was running before snap)
|
||||||
|
if vmstatus == 'running':
|
||||||
|
self.start_instance(vm, vmid, timeout)
|
||||||
|
|
||||||
|
def start_instance(self, vm, vmid, timeout):
|
||||||
|
taskid = self.vmstatus(vm, vmid).start.post()
|
||||||
|
while timeout:
|
||||||
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
|
return True
|
||||||
|
timeout -= 1
|
||||||
|
if timeout == 0:
|
||||||
|
self.module.fail_json(msg='Reached timeout while waiting for VM to start. Last line in task before timeout: %s' %
|
||||||
|
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||||
|
time.sleep(1)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def shutdown_instance(self, vm, vmid, timeout):
|
||||||
|
taskid = self.vmstatus(vm, vmid).shutdown.post()
|
||||||
|
while timeout:
|
||||||
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
|
return True
|
||||||
|
timeout -= 1
|
||||||
|
if timeout == 0:
|
||||||
|
self.module.fail_json(msg='Reached timeout while waiting for VM to stop. Last line in task before timeout: %s' %
|
||||||
|
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
|
||||||
|
time.sleep(1)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind):
|
||||||
if self.module.check_mode:
|
if self.module.check_mode:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if vm['type'] == 'lxc':
|
if vm['type'] == 'lxc':
|
||||||
|
if unbind is True:
|
||||||
|
# check if credentials will work
|
||||||
|
# WARN: it is crucial this check runs here!
|
||||||
|
# The correct permissions are required only to reconfig mounts.
|
||||||
|
# Not checking now would allow to remove the configuration BUT
|
||||||
|
# fail later, leaving the container in a misconfigured state.
|
||||||
|
if (
|
||||||
|
self.module.params['api_user'] != 'root@pam'
|
||||||
|
or not self.module.params['api_password']
|
||||||
|
):
|
||||||
|
self.module.fail_json(msg='`unbind=True` requires authentication as `root@pam` with `api_password`, API tokens are not supported.')
|
||||||
|
return False
|
||||||
|
mountpoints = self._container_mp_get(vm, vmid)
|
||||||
|
vmstatus = self.vmstatus(vm, vmid).current().get()['status']
|
||||||
|
if mountpoints:
|
||||||
|
self._container_mp_disable(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||||
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description)
|
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description)
|
||||||
else:
|
else:
|
||||||
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate))
|
taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate))
|
||||||
|
|
||||||
while timeout:
|
while timeout:
|
||||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
if vm['type'] == 'lxc' and unbind is True and mountpoints:
|
||||||
|
self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||||
return True
|
return True
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
|
self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
|
||||||
|
@ -128,6 +221,8 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||||
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
timeout -= 1
|
timeout -= 1
|
||||||
|
if vm['type'] == 'lxc' and unbind is True and mountpoints:
|
||||||
|
self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def snapshot_remove(self, vm, vmid, timeout, snapname, force):
|
def snapshot_remove(self, vm, vmid, timeout, snapname, force):
|
||||||
|
@ -136,8 +231,7 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||||
|
|
||||||
taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force))
|
taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force))
|
||||||
while timeout:
|
while timeout:
|
||||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
|
||||||
return True
|
return True
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' %
|
self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' %
|
||||||
|
@ -153,8 +247,7 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
|
||||||
|
|
||||||
taskid = self.snapshot(vm, vmid)(snapname).post("rollback")
|
taskid = self.snapshot(vm, vmid)(snapname).post("rollback")
|
||||||
while timeout:
|
while timeout:
|
||||||
status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
|
if self.api_task_ok(vm['node'], taskid):
|
||||||
if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK':
|
|
||||||
return True
|
return True
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' %
|
self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' %
|
||||||
|
@ -175,6 +268,7 @@ def main():
|
||||||
description=dict(type='str'),
|
description=dict(type='str'),
|
||||||
snapname=dict(type='str', default='ansible_snap'),
|
snapname=dict(type='str', default='ansible_snap'),
|
||||||
force=dict(type='bool', default=False),
|
force=dict(type='bool', default=False),
|
||||||
|
unbind=dict(type='bool', default=False),
|
||||||
vmstate=dict(type='bool', default=False),
|
vmstate=dict(type='bool', default=False),
|
||||||
)
|
)
|
||||||
module_args.update(snap_args)
|
module_args.update(snap_args)
|
||||||
|
@ -193,6 +287,7 @@ def main():
|
||||||
snapname = module.params['snapname']
|
snapname = module.params['snapname']
|
||||||
timeout = module.params['timeout']
|
timeout = module.params['timeout']
|
||||||
force = module.params['force']
|
force = module.params['force']
|
||||||
|
unbind = module.params['unbind']
|
||||||
vmstate = module.params['vmstate']
|
vmstate = module.params['vmstate']
|
||||||
|
|
||||||
# If hostname is set get the VM id from ProxmoxAPI
|
# If hostname is set get the VM id from ProxmoxAPI
|
||||||
|
@ -209,7 +304,7 @@ def main():
|
||||||
if i['name'] == snapname:
|
if i['name'] == snapname:
|
||||||
module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
|
module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
|
||||||
|
|
||||||
if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate):
|
if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind):
|
||||||
if module.check_mode:
|
if module.check_mode:
|
||||||
module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
|
module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -131,8 +131,7 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible):
|
||||||
Check the task status and wait until the task is completed or the timeout is reached.
|
Check the task status and wait until the task is completed or the timeout is reached.
|
||||||
"""
|
"""
|
||||||
while timeout:
|
while timeout:
|
||||||
task_status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
|
if self.api_task_ok(node, taskid):
|
||||||
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
|
|
||||||
return True
|
return True
|
||||||
timeout = timeout - 1
|
timeout = timeout - 1
|
||||||
if timeout == 0:
|
if timeout == 0:
|
||||||
|
|
Loading…
Reference in a new issue