mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
VMware: Refactor vmware_cluster (#37072)
* Update cluster logic * Refactor code * Documentation update Signed-off-by: Abhijeet Kasurde <akasurde@redhat.com>
This commit is contained in:
parent
b964c46235
commit
8cd395f595
2 changed files with 384 additions and 110 deletions
|
@ -113,17 +113,6 @@ def find_dvspg_by_name(dv_switch, portgroup_name):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
# Maintain for legacy, or remove with 2.1 ?
|
|
||||||
# Should be replaced with find_cluster_by_name
|
|
||||||
def find_cluster_by_name_datacenter(datacenter, cluster_name):
|
|
||||||
|
|
||||||
host_folder = datacenter.hostFolder
|
|
||||||
for folder in host_folder.childEntity:
|
|
||||||
if folder.name == cluster_name:
|
|
||||||
return folder
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
|
def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
|
||||||
if not isinstance(obj_type, list):
|
if not isinstance(obj_type, list):
|
||||||
obj_type = [obj_type]
|
obj_type = [obj_type]
|
||||||
|
|
|
@ -2,60 +2,180 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||||
|
# Copyright: (c) 2018, Ansible Project
|
||||||
|
#
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
ANSIBLE_METADATA = {
|
||||||
'status': ['preview'],
|
'metadata_version': '1.1',
|
||||||
'supported_by': 'community'}
|
'status': ['preview'],
|
||||||
|
'supported_by': 'community'
|
||||||
|
}
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
---
|
---
|
||||||
module: vmware_cluster
|
module: vmware_cluster
|
||||||
short_description: Manage VMware vSphere clusters
|
short_description: Manage VMware vSphere clusters
|
||||||
description:
|
description:
|
||||||
- Add or remove VMware vSphere clusters.
|
- This module can be used to add, remove and update VMware vSphere clusters and its configurations.
|
||||||
|
- Module can manage HA, DRS and VSAN related configurations.
|
||||||
|
- All values and VMware object names are case sensitive.
|
||||||
version_added: '2.0'
|
version_added: '2.0'
|
||||||
author:
|
author:
|
||||||
- Joseph Callen (@jcpowermac)
|
- Joseph Callen (@jcpowermac)
|
||||||
|
- Abhijeet Kasurde (@Akasurde)
|
||||||
requirements:
|
requirements:
|
||||||
- Tested on ESXi 5.5
|
- Tested on ESXi 5.5 and 6.5.
|
||||||
- PyVmomi installed
|
- PyVmomi installed.
|
||||||
options:
|
options:
|
||||||
cluster_name:
|
cluster_name:
|
||||||
description:
|
description:
|
||||||
- The name of the cluster that will be created.
|
- The name of the cluster to be managed.
|
||||||
required: yes
|
required: yes
|
||||||
datacenter_name:
|
datacenter:
|
||||||
description:
|
description:
|
||||||
- The name of the datacenter the cluster will be created in.
|
- The name of the datacenter.
|
||||||
required: yes
|
required: yes
|
||||||
|
aliases: [ datacenter_name ]
|
||||||
enable_drs:
|
enable_drs:
|
||||||
description:
|
description:
|
||||||
- If set to C(yes) will enable DRS when the cluster is created.
|
- If set to C(yes), will enable DRS when the cluster is created.
|
||||||
type: bool
|
type: bool
|
||||||
default: 'no'
|
default: 'no'
|
||||||
|
drs_enable_vm_behavior_overrides:
|
||||||
|
description:
|
||||||
|
- Determines whether DRS Behavior overrides for individual virtual machines are enabled.
|
||||||
|
- If set to C(True), overrides C(drs_default_vm_behavior).
|
||||||
|
type: bool
|
||||||
|
default: True
|
||||||
|
version_added: 2.8
|
||||||
|
drs_default_vm_behavior:
|
||||||
|
description:
|
||||||
|
- Specifies the cluster-wide default DRS behavior for virtual machines.
|
||||||
|
- If set to C(partiallyAutomated), then vCenter generate recommendations for virtual machine migration and
|
||||||
|
for the placement with a host. vCenter automatically implement placement at power on.
|
||||||
|
- If set to C(manual), then vCenter generate recommendations for virtual machine migration and
|
||||||
|
for the placement with a host. vCenter should not implement the recommendations automatically.
|
||||||
|
- If set to C(fullyAutomated), then vCenter should automate both the migration of virtual machines
|
||||||
|
and their placement with a host at power on.
|
||||||
|
default: fullyAutomated
|
||||||
|
choices: [ fullyAutomated, manual, partiallyAutomated ]
|
||||||
|
version_added: 2.8
|
||||||
|
drs_vmotion_rate:
|
||||||
|
description:
|
||||||
|
- Threshold for generated ClusterRecommendations.
|
||||||
|
default: 3
|
||||||
|
choices: [ 1, 2, 3, 4, 5 ]
|
||||||
|
version_added: 2.8
|
||||||
enable_ha:
|
enable_ha:
|
||||||
description:
|
description:
|
||||||
- If set to C(yes) will enable HA when the cluster is created.
|
- If set to C(yes) will enable HA when the cluster is created.
|
||||||
type: bool
|
type: bool
|
||||||
default: 'no'
|
default: 'no'
|
||||||
|
ha_host_monitoring:
|
||||||
|
description:
|
||||||
|
- Indicates whether HA restarts virtual machines after a host fails.
|
||||||
|
- If set to C(enabled), HA restarts virtual machines after a host fails.
|
||||||
|
- If set to C(disabled), HA does not restart virtual machines after a host fails.
|
||||||
|
- If C(enable_ha) is set to C(no), then this value is ignored.
|
||||||
|
choices: [ 'enabled', 'disabled' ]
|
||||||
|
default: 'enabled'
|
||||||
|
version_added: 2.8
|
||||||
|
ha_vm_monitoring:
|
||||||
|
description:
|
||||||
|
- Indicates the state of virtual machine health monitoring service.
|
||||||
|
- If set to C(vmAndAppMonitoring), HA response to both virtual machine and application heartbeat failure.
|
||||||
|
- If set to C(vmMonitoringDisabled), virtual machine health monitoring is disabled.
|
||||||
|
- If set to C(vmMonitoringOnly), HA response to virtual machine heartbeat failure.
|
||||||
|
- If C(enable_ha) is set to C(no), then this value is ignored.
|
||||||
|
choices: ['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled']
|
||||||
|
default: 'vmMonitoringDisabled'
|
||||||
|
version_added: 2.8
|
||||||
|
ha_failover_level:
|
||||||
|
description:
|
||||||
|
- Number of host failures that should be tolerated, still guaranteeing sufficient resources to
|
||||||
|
restart virtual machines on available hosts.
|
||||||
|
- Accepts integer values only.
|
||||||
|
default: 2
|
||||||
|
version_added: 2.8
|
||||||
|
ha_admission_control_enabled:
|
||||||
|
description:
|
||||||
|
- Determines if strict admission control is enabled.
|
||||||
|
- It is recommended to set this parameter to C(True), please refer documentation for more details.
|
||||||
|
default: True
|
||||||
|
type: bool
|
||||||
|
version_added: 2.8
|
||||||
|
ha_vm_failure_interval:
|
||||||
|
description:
|
||||||
|
- The number of seconds after which virtual machine is declared as failed
|
||||||
|
if no heartbeat has been received.
|
||||||
|
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||||
|
- Unit is seconds.
|
||||||
|
default: 30
|
||||||
|
version_added: 2.8
|
||||||
|
ha_vm_min_up_time:
|
||||||
|
description:
|
||||||
|
- The number of seconds for the virtual machine's heartbeats to stabilize after
|
||||||
|
the virtual machine has been powered on.
|
||||||
|
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||||
|
- Unit is seconds.
|
||||||
|
default: 120
|
||||||
|
version_added: 2.8
|
||||||
|
ha_vm_max_failures:
|
||||||
|
description:
|
||||||
|
- Maximum number of failures and automated resets allowed during the time
|
||||||
|
that C(ha_vm_max_failure_window) specifies.
|
||||||
|
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||||
|
default: 3
|
||||||
|
version_added: 2.8
|
||||||
|
ha_vm_max_failure_window:
|
||||||
|
description:
|
||||||
|
- The number of seconds for the window during which up to C(ha_vm_max_failures) resets
|
||||||
|
can occur before automated responses stop.
|
||||||
|
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||||
|
- Unit is seconds.
|
||||||
|
- Default specifies no failure window.
|
||||||
|
default: -1
|
||||||
|
version_added: 2.8
|
||||||
|
ha_restart_priority:
|
||||||
|
description:
|
||||||
|
- Determines the preference that HA gives to a virtual machine if sufficient capacity is not available
|
||||||
|
to power on all failed virtual machines.
|
||||||
|
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||||
|
- If set to C(disabled), then HA is disabled for this virtual machine.
|
||||||
|
- If set to C(high), then virtual machine with this priority have a higher chance of powering on after a failure,
|
||||||
|
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||||
|
- If set to C(medium), then virtual machine with this priority have an intermediate chance of powering on after a failure,
|
||||||
|
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||||
|
- If set to C(low), then virtual machine with this priority have a lower chance of powering on after a failure,
|
||||||
|
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||||
|
default: 'medium'
|
||||||
|
version_added: 2.8
|
||||||
|
choices: [ 'disabled', 'high', 'low', 'medium' ]
|
||||||
enable_vsan:
|
enable_vsan:
|
||||||
description:
|
description:
|
||||||
- If set to C(yes) will enable vSAN when the cluster is created.
|
- If set to C(yes) will enable vSAN when the cluster is created.
|
||||||
type: bool
|
type: bool
|
||||||
default: 'no'
|
default: 'no'
|
||||||
|
vsan_auto_claim_storage:
|
||||||
|
description:
|
||||||
|
- Determines whether the VSAN service is configured to automatically claim local storage
|
||||||
|
on VSAN-enabled hosts in the cluster.
|
||||||
|
type: bool
|
||||||
|
default: False
|
||||||
|
version_added: 2.8
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- Create (C(present)) or remove (C(absent)) a VMware vSphere cluster.
|
- Create C(present) or remove C(absent) a VMware vSphere cluster.
|
||||||
choices: [absent, present]
|
choices: [ absent, present ]
|
||||||
default: present
|
default: present
|
||||||
extends_documentation_fragment: vmware.documentation
|
extends_documentation_fragment: vmware.documentation
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = r"""
|
||||||
- name: Create Cluster
|
- name: Create Cluster
|
||||||
vmware_cluster:
|
vmware_cluster:
|
||||||
hostname: '{{ vcenter_hostname }}'
|
hostname: '{{ vcenter_hostname }}'
|
||||||
|
@ -67,47 +187,73 @@ EXAMPLES = r'''
|
||||||
enable_drs: yes
|
enable_drs: yes
|
||||||
enable_vsan: yes
|
enable_vsan: yes
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
'''
|
|
||||||
|
- name: Create Cluster with additional changes
|
||||||
|
vmware_cluster:
|
||||||
|
hostname: "{{ vcenter_server }}"
|
||||||
|
username: "{{ vcenter_user }}"
|
||||||
|
password: "{{ vcenter_pass }}"
|
||||||
|
validate_certs: no
|
||||||
|
datacenter_name: DC0
|
||||||
|
cluster_name: "{{ cluster_name }}"
|
||||||
|
enable_ha: True
|
||||||
|
ha_vm_monitoring: vmMonitoringOnly
|
||||||
|
enable_drs: True
|
||||||
|
drs_default_vm_behavior: partiallyAutomated
|
||||||
|
enable_vsan: True
|
||||||
|
register: cl_result
|
||||||
|
delegate_to: localhost
|
||||||
|
|
||||||
|
- name: Delete Cluster
|
||||||
|
vmware_cluster:
|
||||||
|
hostname: "{{ vcenter_server }}"
|
||||||
|
username: "{{ vcenter_user }}"
|
||||||
|
password: "{{ vcenter_pass }}"
|
||||||
|
datacenter_name: datacenter
|
||||||
|
cluster_name: cluster
|
||||||
|
enable_ha: yes
|
||||||
|
enable_drs: yes
|
||||||
|
enable_vsan: yes
|
||||||
|
state: absent
|
||||||
|
"""
|
||||||
|
|
||||||
|
RETURN = r"""#
|
||||||
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from pyVmomi import vim, vmodl
|
from pyVmomi import vim, vmodl
|
||||||
HAS_PYVMOMI = True
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_PYVMOMI = False
|
pass
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
from ansible.module_utils.vmware import (HAS_PYVMOMI,
|
from ansible.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name,
|
||||||
TaskError,
|
vmware_argument_spec, wait_for_task)
|
||||||
connect_to_api,
|
from ansible.module_utils._text import to_native
|
||||||
find_cluster_by_name_datacenter,
|
|
||||||
find_datacenter_by_name,
|
|
||||||
vmware_argument_spec,
|
|
||||||
wait_for_task
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VMwareCluster(object):
|
class VMwareCluster(PyVmomi):
|
||||||
def __init__(self, module):
|
def __init__(self, module):
|
||||||
self.module = module
|
super(VMwareCluster, self).__init__(module)
|
||||||
self.cluster_name = module.params['cluster_name']
|
self.cluster_name = module.params['cluster_name']
|
||||||
self.datacenter_name = module.params['datacenter_name']
|
self.datacenter_name = module.params['datacenter']
|
||||||
self.enable_drs = module.params['enable_drs']
|
self.enable_drs = module.params['enable_drs']
|
||||||
self.enable_ha = module.params['enable_ha']
|
self.enable_ha = module.params['enable_ha']
|
||||||
self.enable_vsan = module.params['enable_vsan']
|
self.enable_vsan = module.params['enable_vsan']
|
||||||
self.desired_state = module.params['state']
|
self.desired_state = module.params['state']
|
||||||
self.datacenter = None
|
self.datacenter = None
|
||||||
self.cluster = None
|
self.cluster = None
|
||||||
self.content = connect_to_api(module)
|
|
||||||
|
|
||||||
def process_state(self):
|
def process_state(self):
|
||||||
|
"""
|
||||||
|
Manage internal states of cluster
|
||||||
|
"""
|
||||||
cluster_states = {
|
cluster_states = {
|
||||||
'absent': {
|
'absent': {
|
||||||
'present': self.state_destroy_cluster,
|
'present': self.state_destroy_cluster,
|
||||||
'absent': self.state_exit_unchanged,
|
'absent': self.state_exit_unchanged,
|
||||||
},
|
},
|
||||||
'present': {
|
'present': {
|
||||||
'update': self.state_update_cluster,
|
'present': self.state_update_cluster,
|
||||||
'present': self.state_exit_unchanged,
|
|
||||||
'absent': self.state_create_cluster,
|
'absent': self.state_create_cluster,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -117,27 +263,71 @@ class VMwareCluster(object):
|
||||||
cluster_states[self.desired_state][current_state]()
|
cluster_states[self.desired_state][current_state]()
|
||||||
|
|
||||||
def configure_ha(self):
|
def configure_ha(self):
|
||||||
|
"""
|
||||||
|
Manage HA Configuration
|
||||||
|
Returns: Cluster DAS configuration spec
|
||||||
|
|
||||||
|
"""
|
||||||
das_config = vim.cluster.DasConfigInfo()
|
das_config = vim.cluster.DasConfigInfo()
|
||||||
das_config.enabled = self.enable_ha
|
das_config.enabled = self.enable_ha
|
||||||
das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
|
das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
|
||||||
das_config.admissionControlPolicy.failoverLevel = 2
|
das_config.admissionControlPolicy.failoverLevel = self.params.get('ha_failover_level')
|
||||||
|
|
||||||
|
ha_vm_monitoring = self.params.get('ha_vm_monitoring')
|
||||||
|
das_vm_config = None
|
||||||
|
if ha_vm_monitoring in ['vmMonitoringOnly', 'vmAndAppMonitoring']:
|
||||||
|
vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
|
||||||
|
vm_tool_spec.enabled = True
|
||||||
|
vm_tool_spec.vmMonitoring = ha_vm_monitoring
|
||||||
|
vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
|
||||||
|
vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
|
||||||
|
vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
|
||||||
|
vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')
|
||||||
|
|
||||||
|
das_vm_config = vim.cluster.DasVmSettings()
|
||||||
|
das_vm_config.restartPriority = self.params.get('ha_restart_priority')
|
||||||
|
das_vm_config.isolationResponse = None
|
||||||
|
das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
|
||||||
|
|
||||||
|
das_config.admissionControlEnabled = self.params.get('ha_admission_control_enabled')
|
||||||
|
|
||||||
|
das_config.hostMonitoring = self.params.get('ha_host_monitoring')
|
||||||
|
das_config.vmMonitoring = ha_vm_monitoring
|
||||||
|
das_config.defaultVmSettings = das_vm_config
|
||||||
|
|
||||||
return das_config
|
return das_config
|
||||||
|
|
||||||
def configure_drs(self):
|
def configure_drs(self):
|
||||||
|
"""
|
||||||
|
Manage DRS configuration
|
||||||
|
Returns: Cluster DRS configuration spec
|
||||||
|
|
||||||
|
"""
|
||||||
drs_config = vim.cluster.DrsConfigInfo()
|
drs_config = vim.cluster.DrsConfigInfo()
|
||||||
|
|
||||||
drs_config.enabled = self.enable_drs
|
drs_config.enabled = self.enable_drs
|
||||||
# Set to partially automated
|
drs_config.enableVmBehaviorOverrides = self.params.get('drs_enable_vm_behavior_overrides')
|
||||||
drs_config.vmotionRate = 3
|
drs_config.defaultVmBehavior = self.params.get('drs_default_vm_behavior')
|
||||||
|
drs_config.vmotionRate = self.params.get('drs_vmotion_rate')
|
||||||
|
|
||||||
return drs_config
|
return drs_config
|
||||||
|
|
||||||
def configure_vsan(self):
|
def configure_vsan(self):
|
||||||
|
"""
|
||||||
|
Manage VSAN configuration
|
||||||
|
Returns: Cluster VSAN configuration spec
|
||||||
|
|
||||||
|
"""
|
||||||
vsan_config = vim.vsan.cluster.ConfigInfo()
|
vsan_config = vim.vsan.cluster.ConfigInfo()
|
||||||
vsan_config.enabled = self.enable_vsan
|
vsan_config.enabled = self.enable_vsan
|
||||||
vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
|
vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
|
||||||
vsan_config.defaultConfig.autoClaimStorage = False
|
vsan_config.defaultConfig.autoClaimStorage = self.params.get('vsan_auto_claim_storage')
|
||||||
return vsan_config
|
return vsan_config
|
||||||
|
|
||||||
def state_create_cluster(self):
|
def state_create_cluster(self):
|
||||||
|
"""
|
||||||
|
Create cluster with given configuration
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
||||||
cluster_config_spec.dasConfig = self.configure_ha()
|
cluster_config_spec.dasConfig = self.configure_ha()
|
||||||
|
@ -148,23 +338,33 @@ class VMwareCluster(object):
|
||||||
self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
|
self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
|
||||||
self.module.exit_json(changed=True)
|
self.module.exit_json(changed=True)
|
||||||
except vim.fault.DuplicateName:
|
except vim.fault.DuplicateName:
|
||||||
self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name)
|
# To match other vmware_* modules
|
||||||
except vmodl.fault.InvalidArgument:
|
pass
|
||||||
self.module.fail_json(msg="Cluster configuration specification parameter is invalid")
|
except vmodl.fault.InvalidArgument as invalid_args:
|
||||||
except vim.fault.InvalidName:
|
self.module.fail_json(msg="Cluster configuration specification"
|
||||||
self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name)
|
" parameter is invalid : %s" % to_native(invalid_args.msg))
|
||||||
except vmodl.fault.NotSupported:
|
except vim.fault.InvalidName as invalid_name:
|
||||||
|
self.module.fail_json(msg="'%s' is an invalid name for a"
|
||||||
|
" cluster : %s" % (self.cluster_name,
|
||||||
|
to_native(invalid_name.msg)))
|
||||||
|
except vmodl.fault.NotSupported as not_supported:
|
||||||
# This should never happen
|
# This should never happen
|
||||||
self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object")
|
self.module.fail_json(msg="Trying to create a cluster on an incorrect"
|
||||||
|
" folder object : %s" % to_native(not_supported.msg))
|
||||||
except vmodl.RuntimeFault as runtime_fault:
|
except vmodl.RuntimeFault as runtime_fault:
|
||||||
self.module.fail_json(msg=runtime_fault.msg)
|
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||||
except vmodl.MethodFault as method_fault:
|
except vmodl.MethodFault as method_fault:
|
||||||
# This should never happen either
|
# This should never happen either
|
||||||
self.module.fail_json(msg=method_fault.msg)
|
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||||
|
except Exception as generic_exc:
|
||||||
|
self.module.fail_json(msg="Failed to create cluster"
|
||||||
|
" due to generic exception %s" % to_native(generic_exc))
|
||||||
|
|
||||||
def state_destroy_cluster(self):
|
def state_destroy_cluster(self):
|
||||||
changed = True
|
"""
|
||||||
result = None
|
Destroy cluster
|
||||||
|
"""
|
||||||
|
changed, result = False, None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode:
|
||||||
|
@ -172,79 +372,167 @@ class VMwareCluster(object):
|
||||||
changed, result = wait_for_task(task)
|
changed, result = wait_for_task(task)
|
||||||
self.module.exit_json(changed=changed, result=result)
|
self.module.exit_json(changed=changed, result=result)
|
||||||
except vim.fault.VimFault as vim_fault:
|
except vim.fault.VimFault as vim_fault:
|
||||||
self.module.fail_json(msg=vim_fault.msg)
|
self.module.fail_json(msg=to_native(vim_fault.msg))
|
||||||
except vmodl.RuntimeFault as runtime_fault:
|
except vmodl.RuntimeFault as runtime_fault:
|
||||||
self.module.fail_json(msg=runtime_fault.msg)
|
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||||
except vmodl.MethodFault as method_fault:
|
except vmodl.MethodFault as method_fault:
|
||||||
self.module.fail_json(msg=method_fault.msg)
|
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||||
|
except Exception as generic_exc:
|
||||||
|
self.module.fail_json(msg="Failed to destroy cluster"
|
||||||
|
" due to generic exception %s" % to_native(generic_exc))
|
||||||
|
|
||||||
def state_exit_unchanged(self):
|
def state_exit_unchanged(self):
|
||||||
|
"""
|
||||||
|
Exit without any change
|
||||||
|
"""
|
||||||
self.module.exit_json(changed=False)
|
self.module.exit_json(changed=False)
|
||||||
|
|
||||||
def state_update_cluster(self):
|
def state_update_cluster(self):
|
||||||
|
"""
|
||||||
|
Update cluster configuration of existing cluster
|
||||||
|
"""
|
||||||
|
changed, result = False, None
|
||||||
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
||||||
changed = True
|
diff = False # Triggers Reconfigure Task only when there is a change
|
||||||
result = None
|
if self.check_ha_config_diff():
|
||||||
|
|
||||||
if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
|
|
||||||
cluster_config_spec.dasConfig = self.configure_ha()
|
cluster_config_spec.dasConfig = self.configure_ha()
|
||||||
if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
|
diff = True
|
||||||
|
if self.check_drs_config_diff():
|
||||||
cluster_config_spec.drsConfig = self.configure_drs()
|
cluster_config_spec.drsConfig = self.configure_drs()
|
||||||
if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
|
diff = True
|
||||||
|
if self.check_vsan_config_diff():
|
||||||
cluster_config_spec.vsanConfig = self.configure_vsan()
|
cluster_config_spec.vsanConfig = self.configure_vsan()
|
||||||
|
diff = True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not self.module.check_mode:
|
if not self.module.check_mode and diff:
|
||||||
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
|
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
|
||||||
changed, result = wait_for_task(task)
|
changed, result = wait_for_task(task)
|
||||||
self.module.exit_json(changed=changed, result=result)
|
self.module.exit_json(changed=changed, result=result)
|
||||||
except vmodl.RuntimeFault as runtime_fault:
|
except vmodl.RuntimeFault as runtime_fault:
|
||||||
self.module.fail_json(msg=runtime_fault.msg)
|
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||||
except vmodl.MethodFault as method_fault:
|
except vmodl.MethodFault as method_fault:
|
||||||
self.module.fail_json(msg=method_fault.msg)
|
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||||
except TaskError as task_e:
|
except TaskError as task_e:
|
||||||
self.module.fail_json(msg=str(task_e))
|
self.module.fail_json(msg=to_native(task_e))
|
||||||
|
except Exception as generic_exc:
|
||||||
|
self.module.fail_json(msg="Failed to update cluster"
|
||||||
|
" due to generic exception %s" % to_native(generic_exc))
|
||||||
|
|
||||||
|
def check_ha_config_diff(self):
|
||||||
|
"""
|
||||||
|
Check HA configuration diff
|
||||||
|
Returns: True if there is diff, else False
|
||||||
|
|
||||||
|
"""
|
||||||
|
das_config = self.cluster.configurationEx.dasConfig
|
||||||
|
if das_config.enabled != self.enable_ha or \
|
||||||
|
das_config.admissionControlPolicy.failoverLevel != self.params.get('ha_failover_level') or \
|
||||||
|
das_config.vmMonitoring != self.params.get('ha_vm_monitoring') or \
|
||||||
|
das_config.hostMonitoring != self.params.get('ha_host_monitoring') or \
|
||||||
|
das_config.admissionControlPolicy.failoverLevel != self.params.get('ha_failover_level') or \
|
||||||
|
das_config.admissionControlEnabled != self.params.get('ha_admission_control_enabled') or \
|
||||||
|
das_config.defaultVmSettings.restartPriority != self.params.get('ha_restart_priority') or \
|
||||||
|
das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring != self.params.get('ha_vm_monitoring') or \
|
||||||
|
das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval != self.params.get('ha_vm_failure_interval') or \
|
||||||
|
das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime != self.params.get('ha_vm_min_up_time') or \
|
||||||
|
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures != self.params.get('ha_vm_max_failures') or \
|
||||||
|
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow != self.params.get('ha_vm_max_failure_window'):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def check_drs_config_diff(self):
|
||||||
|
"""
|
||||||
|
Check DRS configuration diff
|
||||||
|
Returns: True if there is diff, else False
|
||||||
|
|
||||||
|
"""
|
||||||
|
drs_config = self.cluster.configurationEx.drsConfig
|
||||||
|
|
||||||
|
if drs_config.enabled != self.enable_drs or \
|
||||||
|
drs_config.enableVmBehaviorOverrides != self.params.get('drs_enable_vm_behavior_overrides') or \
|
||||||
|
drs_config.defaultVmBehavior != self.params.get('drs_default_vm_behavior') or \
|
||||||
|
drs_config.vmotionRate != self.params.get('drs_vmotion_rate'):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def check_vsan_config_diff(self):
|
||||||
|
"""
|
||||||
|
Check VSAN configuration diff
|
||||||
|
Returns: True if there is diff, else False
|
||||||
|
|
||||||
|
"""
|
||||||
|
vsan_config = self.cluster.configurationEx.vsanConfigInfo
|
||||||
|
|
||||||
|
if vsan_config.enabled != self.enable_vsan or \
|
||||||
|
vsan_config.defaultConfig.autoClaimStorage != self.params.get('vsan_auto_claim_storage'):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def check_cluster_configuration(self):
|
def check_cluster_configuration(self):
|
||||||
|
"""
|
||||||
|
Check cluster configuration
|
||||||
|
Returns: 'Present' if cluster exists, else 'absent'
|
||||||
|
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
|
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
|
||||||
if self.datacenter is None:
|
if self.datacenter is None:
|
||||||
self.module.fail_json(msg="Datacenter %s does not exist, "
|
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
|
||||||
"please create first with Ansible Module vmware_datacenter or manually."
|
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name)
|
||||||
% self.datacenter_name)
|
|
||||||
self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name)
|
|
||||||
|
|
||||||
if self.cluster is None:
|
if self.cluster is None:
|
||||||
return 'absent'
|
return 'absent'
|
||||||
else:
|
|
||||||
desired_state = (self.enable_ha,
|
|
||||||
self.enable_drs,
|
|
||||||
self.enable_vsan)
|
|
||||||
|
|
||||||
current_state = (self.cluster.configurationEx.dasConfig.enabled,
|
return 'present'
|
||||||
self.cluster.configurationEx.drsConfig.enabled,
|
|
||||||
self.cluster.configurationEx.vsanConfigInfo.enabled)
|
|
||||||
|
|
||||||
if desired_state != current_state:
|
|
||||||
return 'update'
|
|
||||||
else:
|
|
||||||
return 'present'
|
|
||||||
except vmodl.RuntimeFault as runtime_fault:
|
except vmodl.RuntimeFault as runtime_fault:
|
||||||
self.module.fail_json(msg=runtime_fault.msg)
|
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||||
except vmodl.MethodFault as method_fault:
|
except vmodl.MethodFault as method_fault:
|
||||||
self.module.fail_json(msg=method_fault.msg)
|
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||||
|
except Exception as generic_exc:
|
||||||
|
self.module.fail_json(msg="Failed to check configuration"
|
||||||
|
" due to generic exception %s" % to_native(generic_exc))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
argument_spec = vmware_argument_spec()
|
argument_spec = vmware_argument_spec()
|
||||||
argument_spec.update(dict(
|
argument_spec.update(dict(
|
||||||
cluster_name=dict(type='str', required=True),
|
cluster_name=dict(type='str', required=True),
|
||||||
datacenter_name=dict(type='str', required=True),
|
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
|
||||||
|
state=dict(type='str',
|
||||||
|
default='present',
|
||||||
|
choices=['absent', 'present']),
|
||||||
|
# DRS
|
||||||
enable_drs=dict(type='bool', default=False),
|
enable_drs=dict(type='bool', default=False),
|
||||||
|
drs_enable_vm_behavior_overrides=dict(type='bool', default=True),
|
||||||
|
drs_default_vm_behavior=dict(type='str',
|
||||||
|
choices=['fullyAutomated', 'manual', 'partiallyAutomated'],
|
||||||
|
default='fullyAutomated'),
|
||||||
|
drs_vmotion_rate=dict(type='int',
|
||||||
|
choices=range(1, 6),
|
||||||
|
default=3),
|
||||||
|
# HA
|
||||||
enable_ha=dict(type='bool', default=False),
|
enable_ha=dict(type='bool', default=False),
|
||||||
|
ha_failover_level=dict(type='int', default=2),
|
||||||
|
ha_host_monitoring=dict(type='str',
|
||||||
|
default='enabled',
|
||||||
|
choices=['enabled', 'disabled']),
|
||||||
|
# HA VM Monitoring related parameters
|
||||||
|
ha_vm_monitoring=dict(type='str',
|
||||||
|
choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'],
|
||||||
|
default='vmMonitoringDisabled'),
|
||||||
|
ha_vm_failure_interval=dict(type='int', default=30),
|
||||||
|
ha_vm_min_up_time=dict(type='int', default=120),
|
||||||
|
ha_vm_max_failures=dict(type='int', default=3),
|
||||||
|
ha_vm_max_failure_window=dict(type='int', default=-1),
|
||||||
|
|
||||||
|
ha_restart_priority=dict(type='str',
|
||||||
|
choices=['high', 'low', 'medium', 'disabled'],
|
||||||
|
default='medium'),
|
||||||
|
ha_admission_control_enabled=dict(type='bool', default=True),
|
||||||
|
# VSAN
|
||||||
enable_vsan=dict(type='bool', default=False),
|
enable_vsan=dict(type='bool', default=False),
|
||||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
vsan_auto_claim_storage=dict(type='bool', default=False),
|
||||||
))
|
))
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
|
@ -252,9 +540,6 @@ def main():
|
||||||
supports_check_mode=True,
|
supports_check_mode=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not HAS_PYVMOMI:
|
|
||||||
module.fail_json(msg='pyvmomi is required for this module')
|
|
||||||
|
|
||||||
vmware_cluster = VMwareCluster(module)
|
vmware_cluster = VMwareCluster(module)
|
||||||
vmware_cluster.process_state()
|
vmware_cluster.process_state()
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue