mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Consolidate to one module and use new arg spec
This commit is contained in:
parent
7b65afa83e
commit
3170180049
2 changed files with 115 additions and 901 deletions
|
@ -17,10 +17,13 @@
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
---
|
---
|
||||||
module: vmware_deploy_template
|
module: vmware_guest
|
||||||
short_description: Deploy a template to a new virtualmachine in vcenter
|
short_description: Manages virtualmachines in vcenter
|
||||||
description:
|
description:
|
||||||
- Uses the pyvmomi Clone() method to copy a template to a new virtualmachine in vcenter
|
- Uses pyvmomi to ...
|
||||||
|
- copy a template to a new virtualmachine
|
||||||
|
- poweron/poweroff/restart a virtualmachine
|
||||||
|
- remove a virtualmachine
|
||||||
version_added: 2.2
|
version_added: 2.2
|
||||||
author: James Tanner (@jctanner) <tanner.jc@gmail.com>
|
author: James Tanner (@jctanner) <tanner.jc@gmail.com>
|
||||||
notes:
|
notes:
|
||||||
|
@ -29,30 +32,43 @@ requirements:
|
||||||
- "python >= 2.6"
|
- "python >= 2.6"
|
||||||
- PyVmomi
|
- PyVmomi
|
||||||
options:
|
options:
|
||||||
guest:
|
state:
|
||||||
|
description:
|
||||||
|
- What state should the virtualmachine be in?
|
||||||
|
required: True
|
||||||
|
choices: ['present', 'absent', 'poweredon', 'poweredoff', 'restarted', 'suspended']
|
||||||
|
name:
|
||||||
description:
|
description:
|
||||||
- Name of the newly deployed guest
|
- Name of the newly deployed guest
|
||||||
required: True
|
required: True
|
||||||
|
name_match:
|
||||||
|
description:
|
||||||
|
- If multiple vms matching the name, use the first or last found
|
||||||
|
required: False
|
||||||
|
default: 'first'
|
||||||
|
choices: ['first', 'last']
|
||||||
|
uuid:
|
||||||
|
description:
|
||||||
|
- UUID of the instance to manage if known, this is vmware's unique identifier.
|
||||||
|
- This is required if name is not supplied.
|
||||||
|
required: False
|
||||||
template:
|
template:
|
||||||
description:
|
description:
|
||||||
- Name of the template to deploy
|
- Name of the template to deploy, if needed to create the guest (state=present).
|
||||||
required: True
|
- If the guest exists already this setting will be ignored.
|
||||||
vm_folder:
|
required: False
|
||||||
|
folder:
|
||||||
description:
|
description:
|
||||||
- Destination folder path for the new guest
|
- Destination folder path for the new guest
|
||||||
required: False
|
required: False
|
||||||
vm_hardware:
|
hardware:
|
||||||
description:
|
description:
|
||||||
- Attributes such as cpus, memroy, osid, and disk controller
|
- Attributes such as cpus, memroy, osid, and disk controller
|
||||||
required: False
|
required: False
|
||||||
vm_nic:
|
nic:
|
||||||
description:
|
description:
|
||||||
- A list of nics to add
|
- A list of nics to add
|
||||||
required: True
|
required: True
|
||||||
power_on_after_clone:
|
|
||||||
description:
|
|
||||||
- Poweron the VM after it is cloned
|
|
||||||
required: False
|
|
||||||
wait_for_ip_address:
|
wait_for_ip_address:
|
||||||
description:
|
description:
|
||||||
- Wait until vcenter detects an IP address for the guest
|
- Wait until vcenter detects an IP address for the guest
|
||||||
|
@ -61,7 +77,7 @@ options:
|
||||||
description:
|
description:
|
||||||
- Ignore warnings and complete the actions
|
- Ignore warnings and complete the actions
|
||||||
required: False
|
required: False
|
||||||
datacenter_name:
|
datacenter:
|
||||||
description:
|
description:
|
||||||
- Destination datacenter for the deploy operation
|
- Destination datacenter for the deploy operation
|
||||||
required: True
|
required: True
|
||||||
|
@ -75,30 +91,30 @@ extends_documentation_fragment: vmware.documentation
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
Example from Ansible playbook
|
Example from Ansible playbook
|
||||||
- name: create the VM
|
- name: create the VM
|
||||||
vmware_deploy_template:
|
vmware_guest:
|
||||||
validate_certs: False
|
validate_certs: False
|
||||||
hostname: 192.168.1.209
|
hostname: 192.168.1.209
|
||||||
username: administrator@vsphere.local
|
username: administrator@vsphere.local
|
||||||
password: vmware
|
password: vmware
|
||||||
guest: testvm_2
|
name: testvm_2
|
||||||
vm_folder: testvms
|
state: poweredon
|
||||||
vm_disk:
|
folder: testvms
|
||||||
|
disk:
|
||||||
- size_gb: 10
|
- size_gb: 10
|
||||||
type: thin
|
type: thin
|
||||||
datastore: g73_datastore
|
datastore: g73_datastore
|
||||||
vm_nic:
|
nic:
|
||||||
- type: vmxnet3
|
- type: vmxnet3
|
||||||
network: VM Network
|
network: VM Network
|
||||||
network_type: standard
|
network_type: standard
|
||||||
vm_hardware:
|
hardware:
|
||||||
memory_mb: 512
|
memory_mb: 512
|
||||||
num_cpus: 1
|
num_cpus: 1
|
||||||
osid: centos64guest
|
osid: centos64guest
|
||||||
scsi: paravirtual
|
scsi: paravirtual
|
||||||
datacenter_name: datacenter1
|
datacenter: datacenter1
|
||||||
esxi_hostname: 192.168.1.117
|
esxi_hostname: 192.168.1.117
|
||||||
template_src: template_el7
|
template: template_el7
|
||||||
power_on_after_clone: yes
|
|
||||||
wait_for_ip_address: yes
|
wait_for_ip_address: yes
|
||||||
register: deploy
|
register: deploy
|
||||||
'''
|
'''
|
||||||
|
@ -240,7 +256,7 @@ class PyVmomiHelper(object):
|
||||||
return (self.folders, self.folder_map)
|
return (self.folders, self.folder_map)
|
||||||
|
|
||||||
|
|
||||||
def getvm(self, name=None, uuid=None, folder=None, firstmatch=False):
|
def getvm(self, name=None, uuid=None, folder=None, name_match=None):
|
||||||
|
|
||||||
# https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html
|
# https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html
|
||||||
# self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder')
|
# self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder')
|
||||||
|
@ -276,32 +292,35 @@ class PyVmomiHelper(object):
|
||||||
if not type(cObj) == vim.VirtualMachine:
|
if not type(cObj) == vim.VirtualMachine:
|
||||||
continue
|
continue
|
||||||
if cObj.name == name:
|
if cObj.name == name:
|
||||||
#vm = cObj
|
|
||||||
#break
|
|
||||||
matches.append(cObj)
|
matches.append(cObj)
|
||||||
if len(matches) > 1 and not firstmatch:
|
if len(matches) > 1 and not name_match:
|
||||||
assert len(matches) <= 1, "more than 1 vm exists by the name %s in folder %s. Please specify a uuid, a datacenter or firstmatch=true" % name
|
module.fail_json(msg='more than 1 vm exists by the name %s in folder %s. Please specify a uuid, a datacenter or name_match' \
|
||||||
|
% (folder, name))
|
||||||
elif len(matches) > 0:
|
elif len(matches) > 0:
|
||||||
vm = matches[0]
|
vm = matches[0]
|
||||||
#else:
|
|
||||||
#import epdb; epdb.st()
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if firstmatch:
|
vmList = get_all_objs(self.content, [vim.VirtualMachine])
|
||||||
vm = get_obj(self.content, [vim.VirtualMachine], name)
|
if name_match:
|
||||||
|
if name_match == 'first':
|
||||||
|
vm = get_obj(self.content, [vim.VirtualMachine], name)
|
||||||
|
elif name_match == 'last':
|
||||||
|
matches = []
|
||||||
|
vmList = get_all_objs(self.content, [vim.VirtualMachine])
|
||||||
|
for thisvm in vmList:
|
||||||
|
if thisvm.config.name == name:
|
||||||
|
matches.append(thisvm)
|
||||||
|
if matches:
|
||||||
|
vm = matches[-1]
|
||||||
else:
|
else:
|
||||||
matches = []
|
matches = []
|
||||||
vmList = get_all_objs(self.content, [vim.VirtualMachine])
|
vmList = get_all_objs(self.content, [vim.VirtualMachine])
|
||||||
for thisvm in vmList:
|
for thisvm in vmList:
|
||||||
if thisvm.config == None:
|
|
||||||
import epdb; epdb.st()
|
|
||||||
if thisvm.config.name == name:
|
if thisvm.config.name == name:
|
||||||
matches.append(thisvm)
|
matches.append(thisvm)
|
||||||
# FIXME - fail this properly
|
if len(matches) > 1:
|
||||||
#import epdb; epdb.st()
|
module.fail_json(msg='more than 1 vm exists by the name %s. Please specify a uuid, or a folder, or a datacenter or name_match' % name)
|
||||||
assert len(matches) <= 1, "more than 1 vm exists by the name %s. Please specify a folder, a uuid, or firstmatch=true" % name
|
if matches:
|
||||||
if matches:
|
vm = matches[0]
|
||||||
vm = matches[0]
|
|
||||||
|
|
||||||
return vm
|
return vm
|
||||||
|
|
||||||
|
@ -440,16 +459,20 @@ class PyVmomiHelper(object):
|
||||||
|
|
||||||
datacenters = get_all_objs(self.content, [vim.Datacenter])
|
datacenters = get_all_objs(self.content, [vim.Datacenter])
|
||||||
datacenter = get_obj(self.content, [vim.Datacenter],
|
datacenter = get_obj(self.content, [vim.Datacenter],
|
||||||
self.params['datacenter_name'])
|
self.params['datacenter'])
|
||||||
|
|
||||||
# folder is a required clone argument
|
# folder is a required clone argument
|
||||||
if len(datacenters) > 1:
|
if len(datacenters) > 1:
|
||||||
# FIXME: need to find the folder in the right DC.
|
# FIXME: need to find the folder in the right DC.
|
||||||
raise "multi-dc with folders is not yet implemented"
|
raise "multi-dc with folders is not yet implemented"
|
||||||
else:
|
else:
|
||||||
destfolder = get_obj(self.content, [vim.Folder], self.params['vm_folder'])
|
destfolder = get_obj(
|
||||||
|
self.content,
|
||||||
|
[vim.Folder],
|
||||||
|
self.params['folder']
|
||||||
|
)
|
||||||
|
|
||||||
datastore_name = self.params['vm_disk'][0]['datastore']
|
datastore_name = self.params['disk'][0]['datastore']
|
||||||
datastore = get_obj(self.content, [vim.Datastore], datastore_name)
|
datastore = get_obj(self.content, [vim.Datastore], datastore_name)
|
||||||
|
|
||||||
|
|
||||||
|
@ -469,8 +492,8 @@ class PyVmomiHelper(object):
|
||||||
clonespec = vim.vm.CloneSpec()
|
clonespec = vim.vm.CloneSpec()
|
||||||
clonespec.location = relospec
|
clonespec.location = relospec
|
||||||
|
|
||||||
template = get_obj(self.content, [vim.VirtualMachine], self.params['template_src'])
|
template = get_obj(self.content, [vim.VirtualMachine], self.params['template'])
|
||||||
task = template.Clone(folder=destfolder, name=self.params['guest'], spec=clonespec)
|
task = template.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
|
||||||
self.wait_for_task(task)
|
self.wait_for_task(task)
|
||||||
|
|
||||||
if task.info.state == 'error':
|
if task.info.state == 'error':
|
||||||
|
@ -762,56 +785,60 @@ def main():
|
||||||
state=dict(
|
state=dict(
|
||||||
required=False,
|
required=False,
|
||||||
choices=[
|
choices=[
|
||||||
'powered_on',
|
'poweredon',
|
||||||
'powered_off',
|
'poweredoff',
|
||||||
'present',
|
'present',
|
||||||
'absent',
|
'absent',
|
||||||
'restarted',
|
'restarted',
|
||||||
'reconfigured'
|
'reconfigured'
|
||||||
],
|
],
|
||||||
default='present'),
|
default='present'),
|
||||||
template_src=dict(required=False, type='str'),
|
|
||||||
guest=dict(required=True, type='str'),
|
|
||||||
vm_folder=dict(required=False, type='str', default=None),
|
|
||||||
vm_disk=dict(required=False, type='list', default=[]),
|
|
||||||
vm_nic=dict(required=False, type='list', default=[]),
|
|
||||||
vm_hardware=dict(required=False, type='dict', default={}),
|
|
||||||
vm_hw_version=dict(required=False, default=None, type='str'),
|
|
||||||
force=dict(required=False, type='bool', default=False),
|
|
||||||
firstmatch=dict(required=False, type='bool', default=False),
|
|
||||||
datacenter_name=dict(required=False, type='str', default=None),
|
|
||||||
esxi_hostname=dict(required=False, type='str', default=None),
|
|
||||||
validate_certs=dict(required=False, type='bool', default=True),
|
validate_certs=dict(required=False, type='bool', default=True),
|
||||||
power_on_after_clone=dict(required=False, type='bool', default=True),
|
template_src=dict(required=False, type='str', aliases=['template']),
|
||||||
|
name=dict(required=True, type='str'),
|
||||||
|
name_match=dict(required=False, type='str', default='first'),
|
||||||
|
uuid=dict(required=False, type='str'),
|
||||||
|
folder=dict(required=False, type='str', default=None, aliases=['folder']),
|
||||||
|
disk=dict(required=False, type='list', default=[]),
|
||||||
|
nic=dict(required=False, type='list', default=[]),
|
||||||
|
hardware=dict(required=False, type='dict', default={}),
|
||||||
|
force=dict(required=False, type='bool', default=False),
|
||||||
|
datacenter=dict(required=False, type='str', default=None),
|
||||||
|
esxi_hostname=dict(required=False, type='str', default=None),
|
||||||
wait_for_ip_address=dict(required=False, type='bool', default=True)
|
wait_for_ip_address=dict(required=False, type='bool', default=True)
|
||||||
),
|
),
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
mutually_exclusive=[],
|
mutually_exclusive=[],
|
||||||
required_together=[
|
required_together=[
|
||||||
['state', 'force'],
|
['state', 'force'],
|
||||||
[
|
['template'],
|
||||||
'vm_disk',
|
|
||||||
'vm_nic',
|
|
||||||
'vm_hardware',
|
|
||||||
'esxi_hostname'
|
|
||||||
],
|
|
||||||
['template_src'],
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
pyv = PyVmomiHelper(module)
|
pyv = PyVmomiHelper(module)
|
||||||
|
|
||||||
# Check if the VM exists before continuing
|
# Check if the VM exists before continuing
|
||||||
vm = pyv.getvm(name=module.params['guest'],
|
vm = pyv.getvm(name=module.params['name'],
|
||||||
folder=module.params['vm_folder'],
|
folder=module.params['folder'],
|
||||||
firstmatch=module.params['firstmatch'])
|
uuid=module.params['uuid'],
|
||||||
|
name_match=module.params['name_match'])
|
||||||
|
|
||||||
# VM already exists
|
# VM already exists
|
||||||
if vm:
|
if vm:
|
||||||
# Run for facts only
|
|
||||||
if module.params['vmware_guest_facts']:
|
if module.params['state'] == 'absent':
|
||||||
|
# destroy it
|
||||||
|
if module.params['force']:
|
||||||
|
# has to be poweredoff first
|
||||||
|
result = pyv.set_powerstate(vm, 'poweredoff', module.params['force'])
|
||||||
|
result = pyv.remove_vm(vm)
|
||||||
|
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted']:
|
||||||
|
# set powerstate
|
||||||
|
result = pyv.set_powerstate(vm, module.params['state'], module.params['force'])
|
||||||
|
else:
|
||||||
|
# Run for facts only
|
||||||
try:
|
try:
|
||||||
module.exit_json(ansible_facts=pyv.gather_facts(vm))
|
module.exit_json(instance=pyv.gather_facts(vm))
|
||||||
except Exception:
|
except Exception:
|
||||||
e = get_exception()
|
e = get_exception()
|
||||||
module.fail_json(
|
module.fail_json(
|
||||||
|
@ -819,11 +846,22 @@ def main():
|
||||||
|
|
||||||
# VM doesn't exist
|
# VM doesn't exist
|
||||||
else:
|
else:
|
||||||
|
create_states = ['poweredon', 'poweredoff', 'present', 'restarted']
|
||||||
|
if module.params['state'] in create_states:
|
||||||
|
poweron = (module.params['state'] != 'poweredoff')
|
||||||
|
# Create it ...
|
||||||
|
result = pyv.deploy_template(
|
||||||
|
poweron=poweron,
|
||||||
|
wait_for_ip=module.params['wait_for_ip_address']
|
||||||
|
)
|
||||||
|
elif module.params['state'] == 'absent':
|
||||||
|
result = {'changed': False, 'failed': False}
|
||||||
|
else:
|
||||||
|
result = {'changed': False, 'failed': False}
|
||||||
|
|
||||||
# Create it ...
|
# FIXME
|
||||||
result = pyv.deploy_template(poweron=module.params['power_on_after_clone'],
|
if not 'failed' in result:
|
||||||
wait_for_ip=module.params['wait_for_ip_address'])
|
result['failed'] = False
|
||||||
|
|
||||||
|
|
||||||
if result['failed']:
|
if result['failed']:
|
||||||
module.fail_json(**result)
|
module.fail_json(**result)
|
|
@ -1,824 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
module: vmware_guest_state
|
|
||||||
short_description: manage the state of a vmware virtualmachine in vcenter
|
|
||||||
description:
|
|
||||||
- Uses pyvmomi to poweron/poweroff/delete/restart a virtualmachine
|
|
||||||
version_added: 2.2
|
|
||||||
author: James Tanner (@jctanner) <tanner.jc@gmail.com>
|
|
||||||
notes:
|
|
||||||
- Tested on vSphere 6.0
|
|
||||||
requirements:
|
|
||||||
- "python >= 2.6"
|
|
||||||
- PyVmomi
|
|
||||||
options:
|
|
||||||
guest:
|
|
||||||
description:
|
|
||||||
- Name of the newly deployed guest
|
|
||||||
required: True
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- What state should the machine be in?
|
|
||||||
- restarted/absent/poweredon/poweredoff
|
|
||||||
required: True
|
|
||||||
vm_uuid:
|
|
||||||
description:
|
|
||||||
- UUID of the instance to manage if known
|
|
||||||
required: False
|
|
||||||
vm_folder:
|
|
||||||
description:
|
|
||||||
- Folder path for the guest if known
|
|
||||||
required: False
|
|
||||||
firstmatch:
|
|
||||||
description:
|
|
||||||
- If multiple vms match, use the first found
|
|
||||||
required: False
|
|
||||||
force:
|
|
||||||
description:
|
|
||||||
- Ignore warnings and complete the actions
|
|
||||||
required: False
|
|
||||||
datacenter_name:
|
|
||||||
description:
|
|
||||||
- Destination datacenter for the deploy operation
|
|
||||||
required: True
|
|
||||||
extends_documentation_fragment: vmware.documentation
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
Examples from an ansible playbook ...
|
|
||||||
- name: poweroff the VM
|
|
||||||
vmware_guest_state:
|
|
||||||
validate_certs: False
|
|
||||||
hostname: 192.168.1.209
|
|
||||||
username: administrator@vsphere.local
|
|
||||||
password: vmware
|
|
||||||
guest: testvm_2
|
|
||||||
vm_folder: testvms
|
|
||||||
state: powered_off
|
|
||||||
ignore_errors: True
|
|
||||||
|
|
||||||
- name: remove the VM
|
|
||||||
vmware_guest_state:
|
|
||||||
validate_certs: False
|
|
||||||
hostname: 192.168.1.209
|
|
||||||
username: administrator@vsphere.local
|
|
||||||
password: vmware
|
|
||||||
guest: testvm_2
|
|
||||||
vm_folder: testvms
|
|
||||||
state: absent
|
|
||||||
ignore_errors: True
|
|
||||||
'''
|
|
||||||
|
|
||||||
RETURN = '''
|
|
||||||
state=absent
|
|
||||||
'''
|
|
||||||
|
|
||||||
try:
|
|
||||||
import json
|
|
||||||
except ImportError:
|
|
||||||
import simplejson as json
|
|
||||||
|
|
||||||
HAS_PYVMOMI = False
|
|
||||||
try:
|
|
||||||
import pyVmomi
|
|
||||||
from pyVmomi import vim
|
|
||||||
from pyVim.connect import SmartConnect, Disconnect
|
|
||||||
HAS_PYVMOMI = True
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
import atexit
|
|
||||||
import os
|
|
||||||
import ssl
|
|
||||||
import time
|
|
||||||
|
|
||||||
from ansible.module_utils.urls import fetch_url
|
|
||||||
|
|
||||||
|
|
||||||
class PyVmomiHelper(object):
|
|
||||||
|
|
||||||
def __init__(self, module):
|
|
||||||
|
|
||||||
if not HAS_PYVMOMI:
|
|
||||||
module.fail_json(msg='pyvmomi module required')
|
|
||||||
|
|
||||||
self.module = module
|
|
||||||
self.params = module.params
|
|
||||||
self.si = None
|
|
||||||
self.smartconnect()
|
|
||||||
self.datacenter = None
|
|
||||||
|
|
||||||
def smartconnect(self):
|
|
||||||
kwargs = {'host': self.params['hostname'],
|
|
||||||
'user': self.params['username'],
|
|
||||||
'pwd': self.params['password']}
|
|
||||||
|
|
||||||
if hasattr(ssl, 'SSLContext'):
|
|
||||||
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
|
|
||||||
context.verify_mode = ssl.CERT_NONE
|
|
||||||
kwargs['sslContext'] = context
|
|
||||||
|
|
||||||
# CONNECT TO THE SERVER
|
|
||||||
try:
|
|
||||||
self.si = SmartConnect(**kwargs)
|
|
||||||
except Exception:
|
|
||||||
err = get_exception()
|
|
||||||
self.module.fail_json(msg="Cannot connect to %s: %s" %
|
|
||||||
(kwargs['host'], err))
|
|
||||||
atexit.register(Disconnect, self.si)
|
|
||||||
self.content = self.si.RetrieveContent()
|
|
||||||
|
|
||||||
def _build_folder_tree(self, folder, tree={}, treepath=None):
|
|
||||||
|
|
||||||
tree = {'virtualmachines': [],
|
|
||||||
'subfolders': {},
|
|
||||||
'name': folder.name}
|
|
||||||
|
|
||||||
children = None
|
|
||||||
if hasattr(folder, 'childEntity'):
|
|
||||||
children = folder.childEntity
|
|
||||||
|
|
||||||
if children:
|
|
||||||
for child in children:
|
|
||||||
if child == folder or child in tree:
|
|
||||||
continue
|
|
||||||
if type(child) == vim.Folder:
|
|
||||||
#ctree = self._build_folder_tree(child, tree={})
|
|
||||||
ctree = self._build_folder_tree(child)
|
|
||||||
tree['subfolders'][child] = dict.copy(ctree)
|
|
||||||
elif type(child) == vim.VirtualMachine:
|
|
||||||
tree['virtualmachines'].append(child)
|
|
||||||
else:
|
|
||||||
if type(folder) == vim.VirtualMachine:
|
|
||||||
return folder
|
|
||||||
return tree
|
|
||||||
|
|
||||||
|
|
||||||
def _build_folder_map(self, folder, vmap={}, inpath='/'):
|
|
||||||
|
|
||||||
''' Build a searchable index for vms+uuids+folders '''
|
|
||||||
|
|
||||||
if type(folder) == tuple:
|
|
||||||
folder = folder[1]
|
|
||||||
|
|
||||||
if not 'names' in vmap:
|
|
||||||
vmap['names'] = {}
|
|
||||||
if not 'uuids' in vmap:
|
|
||||||
vmap['uuids'] = {}
|
|
||||||
if not 'paths' in vmap:
|
|
||||||
vmap['paths'] = {}
|
|
||||||
|
|
||||||
if inpath == '/':
|
|
||||||
thispath = '/vm'
|
|
||||||
else:
|
|
||||||
thispath = os.path.join(inpath, folder['name'])
|
|
||||||
|
|
||||||
for item in folder.items():
|
|
||||||
k = item[0]
|
|
||||||
v = item[1]
|
|
||||||
if k == 'name':
|
|
||||||
pass
|
|
||||||
elif k == 'subfolders':
|
|
||||||
for x in v.items():
|
|
||||||
vmap = self._build_folder_map(x, vmap=vmap, inpath=thispath)
|
|
||||||
elif k == 'virtualmachines':
|
|
||||||
for x in v:
|
|
||||||
if not x.config.name in vmap['names']:
|
|
||||||
vmap['names'][x.config.name] = []
|
|
||||||
vmap['names'][x.config.name].append(x.config.uuid)
|
|
||||||
vmap['uuids'][x.config.uuid] = x.config.name
|
|
||||||
if not thispath in vmap['paths']:
|
|
||||||
vmap['paths'][thispath] = []
|
|
||||||
vmap['paths'][thispath].append(x.config.uuid)
|
|
||||||
|
|
||||||
return vmap
|
|
||||||
|
|
||||||
def getfolders(self):
|
|
||||||
|
|
||||||
if not self.datacenter:
|
|
||||||
self.datacenter = get_obj(self.content, [vim.Datacenter],
|
|
||||||
self.params['esxi']['datacenter'])
|
|
||||||
self.folders = self._build_folder_tree(self.datacenter.vmFolder)
|
|
||||||
self.folder_map = self._build_folder_map(self.folders)
|
|
||||||
return (self.folders, self.folder_map)
|
|
||||||
|
|
||||||
|
|
||||||
def getvm(self, name=None, uuid=None, folder=None, firstmatch=False):
|
|
||||||
|
|
||||||
# https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html
|
|
||||||
# self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder')
|
|
||||||
|
|
||||||
vm = None
|
|
||||||
folder_path = None
|
|
||||||
|
|
||||||
if uuid:
|
|
||||||
vm = self.si.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True)
|
|
||||||
|
|
||||||
elif folder:
|
|
||||||
|
|
||||||
matches = []
|
|
||||||
folder_paths = []
|
|
||||||
|
|
||||||
datacenter = None
|
|
||||||
if 'esxi' in self.params:
|
|
||||||
if 'datacenter' in self.params['esxi']:
|
|
||||||
datacenter = self.params['esxi']['datacenter']
|
|
||||||
|
|
||||||
if datacenter:
|
|
||||||
folder_paths.append('%s/vm/%s' % (datacenter, folder))
|
|
||||||
else:
|
|
||||||
# get a list of datacenters
|
|
||||||
datacenters = get_all_objs(self.content, [vim.Datacenter])
|
|
||||||
datacenters = [x.name for x in datacenters]
|
|
||||||
for dc in datacenters:
|
|
||||||
folder_paths.append('%s/vm/%s' % (dc, folder))
|
|
||||||
|
|
||||||
for folder_path in folder_paths:
|
|
||||||
fObj = self.si.content.searchIndex.FindByInventoryPath(folder_path)
|
|
||||||
for cObj in fObj.childEntity:
|
|
||||||
if not type(cObj) == vim.VirtualMachine:
|
|
||||||
continue
|
|
||||||
if cObj.name == name:
|
|
||||||
#vm = cObj
|
|
||||||
#break
|
|
||||||
matches.append(cObj)
|
|
||||||
if len(matches) > 1 and not firstmatch:
|
|
||||||
assert len(matches) <= 1, "more than 1 vm exists by the name %s in folder %s. Please specify a uuid, a datacenter or firstmatch=true" % name
|
|
||||||
elif len(matches) > 0:
|
|
||||||
vm = matches[0]
|
|
||||||
#else:
|
|
||||||
#import epdb; epdb.st()
|
|
||||||
|
|
||||||
else:
|
|
||||||
if firstmatch:
|
|
||||||
vm = get_obj(self.content, [vim.VirtualMachine], name)
|
|
||||||
else:
|
|
||||||
matches = []
|
|
||||||
vmList = get_all_objs(self.content, [vim.VirtualMachine])
|
|
||||||
for thisvm in vmList:
|
|
||||||
if thisvm.config == None:
|
|
||||||
import epdb; epdb.st()
|
|
||||||
if thisvm.config.name == name:
|
|
||||||
matches.append(thisvm)
|
|
||||||
# FIXME - fail this properly
|
|
||||||
#import epdb; epdb.st()
|
|
||||||
assert len(matches) <= 1, "more than 1 vm exists by the name %s. Please specify a folder, a uuid, or firstmatch=true" % name
|
|
||||||
if matches:
|
|
||||||
vm = matches[0]
|
|
||||||
|
|
||||||
return vm
|
|
||||||
|
|
||||||
|
|
||||||
def set_powerstate(self, vm, state, force):
|
|
||||||
"""
|
|
||||||
Set the power status for a VM determined by the current and
|
|
||||||
requested states. force is forceful
|
|
||||||
"""
|
|
||||||
facts = self.gather_facts(vm)
|
|
||||||
expected_state = state.replace('_', '').lower()
|
|
||||||
current_state = facts['hw_power_status'].lower()
|
|
||||||
result = {}
|
|
||||||
|
|
||||||
# Need Force
|
|
||||||
if not force and current_state not in ['poweredon', 'poweredoff']:
|
|
||||||
return "VM is in %s power state. Force is required!" % current_state
|
|
||||||
|
|
||||||
# State is already true
|
|
||||||
if current_state == expected_state:
|
|
||||||
result['changed'] = False
|
|
||||||
result['failed'] = False
|
|
||||||
else:
|
|
||||||
task = None
|
|
||||||
try:
|
|
||||||
if expected_state == 'poweredoff':
|
|
||||||
task = vm.PowerOff()
|
|
||||||
|
|
||||||
elif expected_state == 'poweredon':
|
|
||||||
task = vm.PowerOn()
|
|
||||||
|
|
||||||
elif expected_state == 'restarted':
|
|
||||||
if current_state in ('poweredon', 'poweringon', 'resetting'):
|
|
||||||
task = vm.Reset()
|
|
||||||
else:
|
|
||||||
result = {'changed': False, 'failed': True,
|
|
||||||
'msg': "Cannot restart VM in the current state %s" % current_state}
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
result = {'changed': False, 'failed': True,
|
|
||||||
'msg': get_exception()}
|
|
||||||
|
|
||||||
if task:
|
|
||||||
self.wait_for_task(task)
|
|
||||||
if task.info.state == 'error':
|
|
||||||
result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
|
|
||||||
else:
|
|
||||||
result = {'changed': True, 'failed': False}
|
|
||||||
|
|
||||||
# need to get new metadata if changed
|
|
||||||
if result['changed']:
|
|
||||||
newvm = self.getvm(uuid=vm.config.uuid)
|
|
||||||
facts = self.gather_facts(newvm)
|
|
||||||
result['instance'] = facts
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def gather_facts(self, vm):
|
|
||||||
|
|
||||||
''' Gather facts from vim.VirtualMachine object. '''
|
|
||||||
|
|
||||||
facts = {
|
|
||||||
'module_hw': True,
|
|
||||||
'hw_name': vm.config.name,
|
|
||||||
'hw_power_status': vm.summary.runtime.powerState,
|
|
||||||
'hw_guest_full_name': vm.summary.guest.guestFullName,
|
|
||||||
'hw_guest_id': vm.summary.guest.guestId,
|
|
||||||
'hw_product_uuid': vm.config.uuid,
|
|
||||||
'hw_processor_count': vm.config.hardware.numCPU,
|
|
||||||
'hw_memtotal_mb': vm.config.hardware.memoryMB,
|
|
||||||
'hw_interfaces':[],
|
|
||||||
'ipv4': None,
|
|
||||||
'ipv6': None,
|
|
||||||
}
|
|
||||||
|
|
||||||
netDict = {}
|
|
||||||
for device in vm.guest.net:
|
|
||||||
mac = device.macAddress
|
|
||||||
ips = list(device.ipAddress)
|
|
||||||
netDict[mac] = ips
|
|
||||||
for k,v in netDict.iteritems():
|
|
||||||
for ipaddress in v:
|
|
||||||
if ipaddress:
|
|
||||||
if '::' in ipaddress:
|
|
||||||
facts['ipv6'] = ipaddress
|
|
||||||
else:
|
|
||||||
facts['ipv4'] = ipaddress
|
|
||||||
|
|
||||||
for idx,entry in enumerate(vm.config.hardware.device):
|
|
||||||
if not hasattr(entry, 'macAddress'):
|
|
||||||
continue
|
|
||||||
|
|
||||||
factname = 'hw_eth' + str(idx)
|
|
||||||
facts[factname] = {
|
|
||||||
'addresstype': entry.addressType,
|
|
||||||
'label': entry.deviceInfo.label,
|
|
||||||
'macaddress': entry.macAddress,
|
|
||||||
'ipaddresses': netDict.get(entry.macAddress, None),
|
|
||||||
'macaddress_dash': entry.macAddress.replace(':', '-'),
|
|
||||||
'summary': entry.deviceInfo.summary,
|
|
||||||
}
|
|
||||||
facts['hw_interfaces'].append('eth'+str(idx))
|
|
||||||
|
|
||||||
return facts
|
|
||||||
|
|
||||||
|
|
||||||
def remove_vm(self, vm):
|
|
||||||
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
|
|
||||||
task = vm.Destroy()
|
|
||||||
self.wait_for_task(task)
|
|
||||||
|
|
||||||
if task.info.state == 'error':
|
|
||||||
return ({'changed': False, 'failed': True, 'msg': task.info.error.msg})
|
|
||||||
else:
|
|
||||||
return ({'changed': True, 'failed': False})
|
|
||||||
|
|
||||||
|
|
||||||
def deploy_template(self, poweron=False, wait_for_ip=False):
|
|
||||||
|
|
||||||
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
|
|
||||||
|
|
||||||
'''
|
|
||||||
deploy_template(
|
|
||||||
vsphere_client=viserver,
|
|
||||||
esxi=esxi,
|
|
||||||
resource_pool=resource_pool,
|
|
||||||
guest=guest,
|
|
||||||
template_src=template_src,
|
|
||||||
module=module,
|
|
||||||
cluster_name=cluster,
|
|
||||||
snapshot_to_clone=snapshot_to_clone,
|
|
||||||
power_on_after_clone=power_on_after_clone,
|
|
||||||
vm_extra_config=vm_extra_config
|
|
||||||
)
|
|
||||||
'''
|
|
||||||
|
|
||||||
# FIXME:
|
|
||||||
# - clusters
|
|
||||||
# - resource pools
|
|
||||||
# - multiple templates by the same name
|
|
||||||
# - static IPs
|
|
||||||
|
|
||||||
datacenters = get_all_objs(self.content, [vim.Datacenter])
|
|
||||||
datacenter = get_obj(self.content, [vim.Datacenter],
|
|
||||||
self.params['esxi']['datacenter'])
|
|
||||||
|
|
||||||
# folder is a required clone argument
|
|
||||||
if len(datacenters) > 1:
|
|
||||||
# FIXME: need to find the folder in the right DC.
|
|
||||||
raise "multi-dc with folders is not yet implemented"
|
|
||||||
else:
|
|
||||||
destfolder = get_obj(self.content, [vim.Folder], self.params['vm_folder'])
|
|
||||||
|
|
||||||
datastore_name = self.params['vm_disk']['disk1']['datastore']
|
|
||||||
datastore = get_obj(self.content, [vim.Datastore], datastore_name)
|
|
||||||
|
|
||||||
|
|
||||||
# cluster or hostsystem ... ?
|
|
||||||
#cluster = get_obj(self.content, [vim.ClusterComputeResource], self.params['esxi']['hostname'])
|
|
||||||
hostsystem = get_obj(self.content, [vim.HostSystem], self.params['esxi']['hostname'])
|
|
||||||
#import epdb; epdb.st()
|
|
||||||
|
|
||||||
resource_pools = get_all_objs(self.content, [vim.ResourcePool])
|
|
||||||
#import epdb; epdb.st()
|
|
||||||
|
|
||||||
relospec = vim.vm.RelocateSpec()
|
|
||||||
relospec.datastore = datastore
|
|
||||||
|
|
||||||
# fixme ... use the pool from the cluster if given
|
|
||||||
relospec.pool = resource_pools[0]
|
|
||||||
relospec.host = hostsystem
|
|
||||||
#import epdb; epdb.st()
|
|
||||||
|
|
||||||
clonespec = vim.vm.CloneSpec()
|
|
||||||
clonespec.location = relospec
|
|
||||||
#clonespec.powerOn = power_on
|
|
||||||
|
|
||||||
template = get_obj(self.content, [vim.VirtualMachine], self.params['template_src'])
|
|
||||||
task = template.Clone(folder=destfolder, name=self.params['guest'], spec=clonespec)
|
|
||||||
self.wait_for_task(task)
|
|
||||||
|
|
||||||
if task.info.state == 'error':
|
|
||||||
return ({'changed': False, 'failed': True, 'msg': task.info.error.msg})
|
|
||||||
else:
|
|
||||||
|
|
||||||
vm = task.info.result
|
|
||||||
if wait_for_ip:
|
|
||||||
self.set_powerstate(vm, 'poweredon', force=False)
|
|
||||||
self.wait_for_vm_ip(vm)
|
|
||||||
vm_facts = self.gather_facts(vm)
|
|
||||||
return ({'changed': True, 'failed': False, 'instance': vm_facts})
|
|
||||||
|
|
||||||
|
|
||||||
def wait_for_task(self, task):
|
|
||||||
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
|
|
||||||
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
|
|
||||||
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
|
|
||||||
while task.info.state not in ['success', 'error']:
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
def wait_for_vm_ip(self, vm, poll=100, sleep=5):
|
|
||||||
ips = None
|
|
||||||
facts = {}
|
|
||||||
thispoll = 0
|
|
||||||
while not ips and thispoll <= poll:
|
|
||||||
newvm = self.getvm(uuid=vm.config.uuid)
|
|
||||||
facts = self.gather_facts(newvm)
|
|
||||||
if facts['ipv4'] or facts['ipv6']:
|
|
||||||
ips = True
|
|
||||||
else:
|
|
||||||
time.sleep(sleep)
|
|
||||||
thispoll += 1
|
|
||||||
|
|
||||||
#import epdb; epdb.st()
|
|
||||||
return facts
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_file_from_guest(self, vm, username, password, src, dest):
|
|
||||||
|
|
||||||
''' Use VMWare's filemanager api to fetch a file over http '''
|
|
||||||
|
|
||||||
result = {'failed': False}
|
|
||||||
|
|
||||||
tools_status = vm.guest.toolsStatus
|
|
||||||
if (tools_status == 'toolsNotInstalled' or
|
|
||||||
tools_status == 'toolsNotRunning'):
|
|
||||||
result['failed'] = True
|
|
||||||
result['msg'] = "VMwareTools is not installed or is not running in the guest"
|
|
||||||
return result
|
|
||||||
|
|
||||||
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
|
|
||||||
creds = vim.vm.guest.NamePasswordAuthentication(
|
|
||||||
username=username, password=password
|
|
||||||
)
|
|
||||||
|
|
||||||
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
|
|
||||||
fti = self.content.guestOperationsManager.fileManager. \
|
|
||||||
InitiateFileTransferFromGuest(vm, creds, src)
|
|
||||||
|
|
||||||
result['size'] = fti.size
|
|
||||||
result['url'] = fti.url
|
|
||||||
|
|
||||||
# Use module_utils to fetch the remote url returned from the api
|
|
||||||
rsp, info = fetch_url(self.module, fti.url, use_proxy=False,
|
|
||||||
force=True, last_mod_time=None,
|
|
||||||
timeout=10, headers=None)
|
|
||||||
|
|
||||||
# save all of the transfer data
|
|
||||||
for k,v in info.iteritems():
|
|
||||||
result[k] = v
|
|
||||||
|
|
||||||
# exit early if xfer failed
|
|
||||||
if info['status'] != 200:
|
|
||||||
result['failed'] = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
# attempt to read the content and write it
|
|
||||||
try:
|
|
||||||
with open(dest, 'wb') as f:
|
|
||||||
f.write(rsp.read())
|
|
||||||
except Exception as e:
|
|
||||||
result['failed'] = True
|
|
||||||
result['msg'] = str(e)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def push_file_to_guest(self, vm, username, password, src, dest, overwrite=True):
|
|
||||||
|
|
||||||
''' Use VMWare's filemanager api to push a file over http '''
|
|
||||||
|
|
||||||
result = {'failed': False}
|
|
||||||
|
|
||||||
tools_status = vm.guest.toolsStatus
|
|
||||||
if (tools_status == 'toolsNotInstalled' or
|
|
||||||
tools_status == 'toolsNotRunning'):
|
|
||||||
result['failed'] = True
|
|
||||||
result['msg'] = "VMwareTools is not installed or is not running in the guest"
|
|
||||||
return result
|
|
||||||
|
|
||||||
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
|
|
||||||
creds = vim.vm.guest.NamePasswordAuthentication(
|
|
||||||
username=username, password=password
|
|
||||||
)
|
|
||||||
|
|
||||||
# the api requires a filesize in bytes
|
|
||||||
filesize = None
|
|
||||||
fdata = None
|
|
||||||
try:
|
|
||||||
#filesize = os.path.getsize(src)
|
|
||||||
filesize = os.stat(src).st_size
|
|
||||||
fdata = None
|
|
||||||
with open(src, 'rb') as f:
|
|
||||||
fdata = f.read()
|
|
||||||
result['local_filesize'] = filesize
|
|
||||||
except Exception as e:
|
|
||||||
result['failed'] = True
|
|
||||||
result['msg'] = "Unable to read src file: %s" % str(e)
|
|
||||||
return result
|
|
||||||
|
|
||||||
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest
|
|
||||||
file_attribute = vim.vm.guest.FileManager.FileAttributes()
|
|
||||||
url = self.content.guestOperationsManager.fileManager. \
|
|
||||||
InitiateFileTransferToGuest(vm, creds, dest, file_attribute,
|
|
||||||
filesize, overwrite)
|
|
||||||
|
|
||||||
# PUT the filedata to the url ...
|
|
||||||
rsp, info = fetch_url(self.module, url, method="put", data=fdata,
|
|
||||||
use_proxy=False, force=True, last_mod_time=None,
|
|
||||||
timeout=10, headers=None)
|
|
||||||
|
|
||||||
result['msg'] = str(rsp.read())
|
|
||||||
|
|
||||||
# save all of the transfer data
|
|
||||||
for k,v in info.iteritems():
|
|
||||||
result[k] = v
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def run_command_in_guest(self, vm, username, password, program_path, program_args, program_cwd, program_env):
|
|
||||||
|
|
||||||
result = {'failed': False}
|
|
||||||
|
|
||||||
tools_status = vm.guest.toolsStatus
|
|
||||||
if (tools_status == 'toolsNotInstalled' or
|
|
||||||
tools_status == 'toolsNotRunning'):
|
|
||||||
result['failed'] = True
|
|
||||||
result['msg'] = "VMwareTools is not installed or is not running in the guest"
|
|
||||||
return result
|
|
||||||
|
|
||||||
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
|
|
||||||
creds = vim.vm.guest.NamePasswordAuthentication(
|
|
||||||
username=username, password=password
|
|
||||||
)
|
|
||||||
|
|
||||||
res = None
|
|
||||||
pdata = None
|
|
||||||
try:
|
|
||||||
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
|
|
||||||
pm = self.content.guestOperationsManager.processManager
|
|
||||||
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
|
|
||||||
ps = vim.vm.guest.ProcessManager.ProgramSpec(
|
|
||||||
#programPath=program,
|
|
||||||
#arguments=args
|
|
||||||
programPath=program_path,
|
|
||||||
arguments=program_args,
|
|
||||||
workingDirectory=program_cwd,
|
|
||||||
)
|
|
||||||
res = pm.StartProgramInGuest(vm, creds, ps)
|
|
||||||
result['pid'] = res
|
|
||||||
pdata = pm.ListProcessesInGuest(vm, creds, [res])
|
|
||||||
|
|
||||||
# wait for pid to finish
|
|
||||||
while not pdata[0].endTime:
|
|
||||||
time.sleep(1)
|
|
||||||
pdata = pm.ListProcessesInGuest(vm, creds, [res])
|
|
||||||
result['owner'] = pdata[0].owner
|
|
||||||
result['startTime'] = pdata[0].startTime.isoformat()
|
|
||||||
result['endTime'] = pdata[0].endTime.isoformat()
|
|
||||||
result['exitCode'] = pdata[0].exitCode
|
|
||||||
if result['exitCode'] != 0:
|
|
||||||
result['failed'] = True
|
|
||||||
result['msg'] = "program exited non-zero"
|
|
||||||
else:
|
|
||||||
result['msg'] = "program completed successfully"
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
result['msg'] = str(e)
|
|
||||||
result['failed'] = True
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def get_obj(content, vimtype, name):
|
|
||||||
"""
|
|
||||||
Return an object by name, if name is None the
|
|
||||||
first found object is returned
|
|
||||||
"""
|
|
||||||
obj = None
|
|
||||||
container = content.viewManager.CreateContainerView(
|
|
||||||
content.rootFolder, vimtype, True)
|
|
||||||
for c in container.view:
|
|
||||||
if name:
|
|
||||||
if c.name == name:
|
|
||||||
obj = c
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
obj = c
|
|
||||||
break
|
|
||||||
|
|
||||||
container.Destroy()
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
def get_all_objs(content, vimtype):
|
|
||||||
"""
|
|
||||||
Get all the vsphere objects associated with a given type
|
|
||||||
"""
|
|
||||||
obj = []
|
|
||||||
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
|
|
||||||
for c in container.view:
|
|
||||||
obj.append(c)
|
|
||||||
container.Destroy()
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
def _build_folder_tree(nodes, parent):
|
|
||||||
tree = {}
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
if node['parent'] == parent:
|
|
||||||
tree[node['name']] = dict.copy(node)
|
|
||||||
tree[node['name']]['subfolders'] = _build_folder_tree(nodes, node['id'])
|
|
||||||
del tree[node['name']]['parent']
|
|
||||||
|
|
||||||
return tree
|
|
||||||
|
|
||||||
|
|
||||||
def _find_path_in_tree(tree, path):
|
|
||||||
for name, o in tree.iteritems():
|
|
||||||
if name == path[0]:
|
|
||||||
if len(path) == 1:
|
|
||||||
return o
|
|
||||||
else:
|
|
||||||
return _find_path_in_tree(o['subfolders'], path[1:])
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _get_folderid_for_path(vsphere_client, datacenter, path):
|
|
||||||
content = vsphere_client._retrieve_properties_traversal(property_names=['name', 'parent'], obj_type=MORTypes.Folder)
|
|
||||||
if not content: return {}
|
|
||||||
|
|
||||||
node_list = [
|
|
||||||
{
|
|
||||||
'id': o.Obj,
|
|
||||||
'name': o.PropSet[0].Val,
|
|
||||||
'parent': (o.PropSet[1].Val if len(o.PropSet) > 1 else None)
|
|
||||||
} for o in content
|
|
||||||
]
|
|
||||||
|
|
||||||
tree = _build_folder_tree(node_list, datacenter)
|
|
||||||
tree = _find_path_in_tree(tree, ['vm'])['subfolders']
|
|
||||||
folder = _find_path_in_tree(tree, path.split('/'))
|
|
||||||
return folder['id'] if folder else None
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
module = AnsibleModule(
|
|
||||||
argument_spec=dict(
|
|
||||||
validate_certs=dict(required=False, type='bool', default=True),
|
|
||||||
hostname=dict(
|
|
||||||
type='str',
|
|
||||||
default=os.environ.get('VMWARE_HOST')
|
|
||||||
),
|
|
||||||
username=dict(
|
|
||||||
type='str',
|
|
||||||
default=os.environ.get('VMWARE_USER')
|
|
||||||
),
|
|
||||||
password=dict(
|
|
||||||
type='str', no_log=True,
|
|
||||||
default=os.environ.get('VMWARE_PASSWORD')
|
|
||||||
),
|
|
||||||
state=dict(
|
|
||||||
required=True,
|
|
||||||
choices=[
|
|
||||||
'powered_on',
|
|
||||||
'powered_off',
|
|
||||||
'present',
|
|
||||||
'absent',
|
|
||||||
'restarted',
|
|
||||||
],
|
|
||||||
),
|
|
||||||
guest=dict(required=True, type='str'),
|
|
||||||
vm_folder=dict(required=False, type='str', default=None),
|
|
||||||
vm_uuid=dict(required=False, type='str', default=None),
|
|
||||||
firstmatch=dict(required=False, type='bool', default=False),
|
|
||||||
force=dict(required=False, type='bool', default=False),
|
|
||||||
datacenter=dict(required=False, type='str', default=None),
|
|
||||||
),
|
|
||||||
supports_check_mode=True,
|
|
||||||
mutually_exclusive=[],
|
|
||||||
required_together=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
pyv = PyVmomiHelper(module)
|
|
||||||
|
|
||||||
# Check if the VM exists before continuing
|
|
||||||
vm = pyv.getvm(name=module.params['guest'],
|
|
||||||
folder=module.params['vm_folder'],
|
|
||||||
uuid=module.params['vm_uuid'],
|
|
||||||
firstmatch=module.params['firstmatch'])
|
|
||||||
|
|
||||||
if vm:
|
|
||||||
# Power Changes
|
|
||||||
if module.params['state'] in ['powered_on', 'powered_off', 'restarted']:
|
|
||||||
result = pyv.set_powerstate(vm, module.params['state'], module.params['force'])
|
|
||||||
|
|
||||||
# Failure
|
|
||||||
if isinstance(result, basestring):
|
|
||||||
result = {'changed': False, 'failed': True, 'msg': result}
|
|
||||||
|
|
||||||
# Just check if there
|
|
||||||
elif module.params['state'] == 'present':
|
|
||||||
result = {'changed': False}
|
|
||||||
|
|
||||||
elif module.params['state'] == 'absent':
|
|
||||||
result = pyv.remove_vm(vm)
|
|
||||||
|
|
||||||
# VM doesn't exist
|
|
||||||
else:
|
|
||||||
|
|
||||||
if module.params['state'] == 'present':
|
|
||||||
result = {'failed': True, 'msg': "vm does not exist"}
|
|
||||||
|
|
||||||
elif module.params['state'] in ['restarted', 'reconfigured']:
|
|
||||||
result = {'changed': False, 'failed': True,
|
|
||||||
'msg': "No such VM %s. States [restarted, reconfigured] required an existing VM" % guest }
|
|
||||||
|
|
||||||
elif module.params['state'] == 'absent':
|
|
||||||
result = {'changed': False, 'failed': False,
|
|
||||||
'msg': "vm %s not present" % module.params['guest']}
|
|
||||||
|
|
||||||
elif module.params['state'] in ['powered_off', 'powered_on']:
|
|
||||||
result = {'changed': False, 'failed': True,
|
|
||||||
'msg': "No such VM %s. States [powered_off, powered_on] required an existing VM" % module.params['guest'] }
|
|
||||||
|
|
||||||
if result['failed']:
|
|
||||||
module.fail_json(**result)
|
|
||||||
else:
|
|
||||||
module.exit_json(**result)
|
|
||||||
|
|
||||||
|
|
||||||
# this is magic, see lib/ansible/module_common.py
|
|
||||||
from ansible.module_utils.basic import *
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
Loading…
Reference in a new issue