mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
New options of na_ontap_aggregate (#48906)
* add new options for na_ontap_aggregate * add gpl line * remove dup option * Put files in wrong directory * change unit test to match the request from PR 48941 * Changed for review comments * pep8
This commit is contained in:
parent
34c57b4c42
commit
62dd1fe29e
2 changed files with 279 additions and 13 deletions
|
@ -15,7 +15,6 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
|
|||
DOCUMENTATION = '''
|
||||
|
||||
module: na_ontap_aggregate
|
||||
|
||||
short_description: NetApp ONTAP manage aggregates.
|
||||
extends_documentation_fragment:
|
||||
- netapp.na_ontap
|
||||
|
@ -80,7 +79,7 @@ options:
|
|||
raid_type:
|
||||
description:
|
||||
- Specifies the type of RAID groups to use in the new aggregate.
|
||||
- The default value is raid4 on most platforms.
|
||||
choices: ['raid4', 'raid_dp', 'raid_tec']
|
||||
version_added: '2.7'
|
||||
|
||||
unmount_volumes:
|
||||
|
@ -90,6 +89,34 @@ options:
|
|||
- before the offline operation is executed.
|
||||
- By default, the system will reject any attempt to offline an aggregate that hosts one or more online volumes.
|
||||
|
||||
disks:
|
||||
type: list
|
||||
description:
|
||||
- Specific list of disks to use for the new aggregate.
|
||||
- To create a "mirrored" aggregate with a specific list of disks, both 'disks' and 'mirror_disks' options must be supplied.
|
||||
Additionally, the same number of disks must be supplied in both lists.
|
||||
version_added: '2.8'
|
||||
|
||||
is_mirrored:
|
||||
type: bool
|
||||
description:
|
||||
- Specifies that the new aggregate be mirrored (have two plexes).
|
||||
- If set to true, then the indicated disks will be split across the two plexes. By default, the new aggregate will not be mirrored.
|
||||
- This option cannot be used when a specific list of disks is supplied with either the 'disks' or 'mirror_disks' options.
|
||||
version_added: '2.8'
|
||||
|
||||
mirror_disks:
|
||||
type: list
|
||||
description:
|
||||
- List of mirror disks to use. It must contain the same number of disks specified in 'disks'.
|
||||
version_added: '2.8'
|
||||
|
||||
spare_pool:
|
||||
description:
|
||||
- Specifies the spare pool from which to select spare disks to use in creation of a new aggregate.
|
||||
choices: ['Pool0', 'Pool1']
|
||||
version_added: '2.8'
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
|
@ -118,8 +145,8 @@ EXAMPLES = """
|
|||
na_ontap_aggregate:
|
||||
state: present
|
||||
service_state: online
|
||||
name: ansibleAggr
|
||||
rename: ansibleAggr2
|
||||
from_name: ansibleAggr
|
||||
name: ansibleAggr2
|
||||
disk_count: 1
|
||||
hostname: "{{ netapp_hostname }}"
|
||||
username: "{{ netapp_username }}"
|
||||
|
@ -155,32 +182,41 @@ class NetAppOntapAggregate(object):
|
|||
def __init__(self):
|
||||
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
|
||||
self.argument_spec.update(dict(
|
||||
state=dict(required=False, choices=[
|
||||
'present', 'absent'], default='present'),
|
||||
service_state=dict(required=False, choices=['online', 'offline']),
|
||||
name=dict(required=True, type='str'),
|
||||
from_name=dict(required=False, type='str'),
|
||||
disks=dict(required=False, type='list'),
|
||||
disk_count=dict(required=False, type='int', default=None),
|
||||
disk_type=dict(required=False, choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD',
|
||||
'VMDISK']),
|
||||
raid_type=dict(required=False, type='str'),
|
||||
disk_size=dict(required=False, type='int'),
|
||||
disk_type=dict(required=False, choices=['ATA', 'BSAS', 'FCAL', 'FSAS', 'LUN', 'MSATA', 'SAS', 'SSD', 'VMDISK']),
|
||||
from_name=dict(required=False, type='str'),
|
||||
mirror_disks=dict(required=False, type='list'),
|
||||
nodes=dict(required=False, type='list'),
|
||||
is_mirrored=dict(required=False, type='bool'),
|
||||
raid_size=dict(required=False, type='int'),
|
||||
raid_type=dict(required=False, choices=['raid4', 'raid_dp', 'raid_tec']),
|
||||
service_state=dict(required=False, choices=['online', 'offline']),
|
||||
spare_pool=dict(required=False, choices=['Pool0', 'Pool1']),
|
||||
state=dict(required=False, choices=['present', 'absent'], default='present'),
|
||||
unmount_volumes=dict(required=False, type='bool'),
|
||||
))
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=self.argument_spec,
|
||||
required_if=[
|
||||
('service_state', 'offline', ['unmount_volumes'])
|
||||
('service_state', 'offline', ['unmount_volumes']),
|
||||
],
|
||||
mutually_exclusive=[
|
||||
('is_mirrored', 'disks'),
|
||||
('is_mirrored', 'mirror_disks'),
|
||||
('is_mirrored', 'spare_pool'),
|
||||
('spare_pool', 'disks')
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
self.na_helper = NetAppModule()
|
||||
self.parameters = self.na_helper.set_parameters(self.module.params)
|
||||
|
||||
if self.parameters.get('mirror_disks') is not None and self.parameters.get('disks') is None:
|
||||
self.module.fail_json(mgs="mirror_disks require disks options to be set")
|
||||
if HAS_NETAPP_LIB is False:
|
||||
self.module.fail_json(msg="the python NetApp-Lib module is required")
|
||||
else:
|
||||
|
@ -279,12 +315,28 @@ class NetAppOntapAggregate(object):
|
|||
options['raid-type'] = self.parameters['raid_type']
|
||||
if self.parameters.get('disk_size'):
|
||||
options['disk-size'] = str(self.parameters['disk_size'])
|
||||
if self.parameters.get('is_mirrored'):
|
||||
options['is-mirrored'] = str(self.parameters['is_mirrored'])
|
||||
if self.parameters.get('spare_pool'):
|
||||
options['spare-pool'] = self.parameters['spare_pool']
|
||||
if self.parameters.get('raid_type'):
|
||||
options['raid-type'] = self.parameters['raid_type']
|
||||
aggr_create = netapp_utils.zapi.NaElement.create_node_with_children('aggr-create', **options)
|
||||
if self.parameters.get('nodes'):
|
||||
nodes_obj = netapp_utils.zapi.NaElement('nodes')
|
||||
aggr_create.add_child_elem(nodes_obj)
|
||||
for node in self.parameters['nodes']:
|
||||
nodes_obj.add_new_child('node-name', node)
|
||||
if self.parameters.get('disks'):
|
||||
disks_obj = netapp_utils.zapi.NaElement('disk-info')
|
||||
for disk in self.parameters.get('disks'):
|
||||
disks_obj.add_new_child('name', disk)
|
||||
aggr_create.add_child_elem(disks_obj)
|
||||
if self.parameters.get('mirror_disks'):
|
||||
mirror_disks_obj = netapp_utils.zapi.NaElement('disk-info')
|
||||
for disk in self.parameters.get('mirror_disks'):
|
||||
mirror_disks_obj.add_new_child('name', disk)
|
||||
aggr_create.add_child_elem(mirror_disks_obj)
|
||||
|
||||
try:
|
||||
self.server.invoke_successfully(aggr_create, enable_tunneling=False)
|
||||
|
|
214
test/units/modules/storage/netapp/test_na_ontap_aggregate.py
Normal file
214
test/units/modules/storage/netapp/test_na_ontap_aggregate.py
Normal file
|
@ -0,0 +1,214 @@
|
|||
""" unit tests for Ansible module: na_ontap_aggregate """
|
||||
|
||||
from __future__ import print_function
|
||||
import json
|
||||
import pytest
|
||||
|
||||
from units.compat import unittest
|
||||
from units.compat.mock import patch, Mock
|
||||
from ansible.module_utils import basic
|
||||
from ansible.module_utils._text import to_bytes
|
||||
import ansible.module_utils.netapp as netapp_utils
|
||||
|
||||
from ansible.modules.storage.netapp.na_ontap_aggregate \
|
||||
import NetAppOntapAggregate as my_module # module under test
|
||||
|
||||
if not netapp_utils.has_netapp_lib():
|
||||
pytestmark = pytest.skip('skipping as missing required netapp_lib')
|
||||
|
||||
|
||||
def set_module_args(args):
|
||||
"""prepare arguments so that they will be picked up during module creation"""
|
||||
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
|
||||
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
|
||||
|
||||
|
||||
class AnsibleExitJson(Exception):
|
||||
"""Exception class to be raised by module.exit_json and caught by the test case"""
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleFailJson(Exception):
|
||||
"""Exception class to be raised by module.fail_json and caught by the test case"""
|
||||
pass
|
||||
|
||||
|
||||
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
|
||||
"""function to patch over exit_json; package return data into an exception"""
|
||||
if 'changed' not in kwargs:
|
||||
kwargs['changed'] = False
|
||||
raise AnsibleExitJson(kwargs)
|
||||
|
||||
|
||||
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
|
||||
"""function to patch over fail_json; package return data into an exception"""
|
||||
kwargs['failed'] = True
|
||||
raise AnsibleFailJson(kwargs)
|
||||
|
||||
|
||||
class MockONTAPConnection(object):
|
||||
''' mock server connection to ONTAP host '''
|
||||
|
||||
def __init__(self, kind=None, parm1=None, parm2=None):
|
||||
''' save arguments '''
|
||||
self.type = kind
|
||||
self.parm1 = parm1
|
||||
self.parm2 = parm2
|
||||
self.xml_in = None
|
||||
self.xml_out = None
|
||||
|
||||
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
|
||||
''' mock invoke_successfully returning xml data '''
|
||||
self.xml_in = xml
|
||||
if self.type == 'aggregate':
|
||||
xml = self.build_aggregate_info(self.parm1, self.parm2)
|
||||
elif self.type == 'aggregate_fail':
|
||||
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
|
||||
self.xml_out = xml
|
||||
return xml
|
||||
|
||||
@staticmethod
|
||||
def build_aggregate_info(vserver, aggregate):
|
||||
''' build xml data for aggregatte and vserser-info '''
|
||||
xml = netapp_utils.zapi.NaElement('xml')
|
||||
data = {'num-records': 2,
|
||||
'attributes-list':
|
||||
{'aggr-attributes':
|
||||
{'aggregate-name': aggregate,
|
||||
'aggr-raid-attributes':
|
||||
{'state': 'offline'
|
||||
}
|
||||
},
|
||||
},
|
||||
'vserver-info':
|
||||
{'vserver-name': vserver
|
||||
}
|
||||
}
|
||||
xml.translate_struct(data)
|
||||
print(xml.to_string())
|
||||
return xml
|
||||
|
||||
|
||||
class TestMyModule(unittest.TestCase):
|
||||
''' a group of related Unit Tests '''
|
||||
|
||||
def setUp(self):
|
||||
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
|
||||
exit_json=exit_json,
|
||||
fail_json=fail_json)
|
||||
self.mock_module_helper.start()
|
||||
self.addCleanup(self.mock_module_helper.stop)
|
||||
self.server = MockONTAPConnection('aggregate', '12', 'name')
|
||||
# whether to use a mock or a simulator
|
||||
self.onbox = False
|
||||
|
||||
def set_default_args(self):
|
||||
if self.onbox:
|
||||
hostname = '10.193.74.78'
|
||||
username = 'admin'
|
||||
password = 'netapp1!'
|
||||
name = 'name'
|
||||
else:
|
||||
hostname = 'hostname'
|
||||
username = 'username'
|
||||
password = 'password'
|
||||
name = 'name'
|
||||
return dict({
|
||||
'hostname': hostname,
|
||||
'username': username,
|
||||
'password': password,
|
||||
'name': name
|
||||
})
|
||||
|
||||
def call_command(self, module_args):
|
||||
''' utility function to call apply '''
|
||||
module_args.update(self.set_default_args())
|
||||
set_module_args(module_args)
|
||||
my_obj = my_module()
|
||||
my_obj.asup_log_for_cserver = Mock(return_value=None)
|
||||
if not self.onbox:
|
||||
# mock the connection
|
||||
my_obj.server = MockONTAPConnection('aggregate', '12', 'test_name')
|
||||
with pytest.raises(AnsibleExitJson) as exc:
|
||||
my_obj.apply()
|
||||
return exc.value.args[0]['changed']
|
||||
|
||||
def test_module_fail_when_required_args_missing(self):
|
||||
''' required arguments are reported as errors '''
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
set_module_args({})
|
||||
my_module()
|
||||
print('Info: %s' % exc.value.args[0]['msg'])
|
||||
|
||||
def test_is_mirrored(self):
|
||||
module_args = {
|
||||
'disk_count': '2',
|
||||
'is_mirrored': 'true',
|
||||
}
|
||||
changed = self.call_command(module_args)
|
||||
assert not changed
|
||||
|
||||
def test_disks_list(self):
|
||||
module_args = {
|
||||
'disk_count': '2',
|
||||
'disks': ['1', '2'],
|
||||
}
|
||||
changed = self.call_command(module_args)
|
||||
assert not changed
|
||||
|
||||
def test_mirror_disks(self):
|
||||
module_args = {
|
||||
'disk_count': '2',
|
||||
'disks': ['1', '2'],
|
||||
'mirror_disks': ['3', '4']
|
||||
}
|
||||
changed = self.call_command(module_args)
|
||||
assert not changed
|
||||
|
||||
def test_spare_pool(self):
|
||||
module_args = {
|
||||
'disk_count': '2',
|
||||
'spare_pool': 'Pool1'
|
||||
}
|
||||
changed = self.call_command(module_args)
|
||||
assert not changed
|
||||
|
||||
def test_rename(self):
|
||||
module_args = {
|
||||
'from_name': 'test_name2'
|
||||
}
|
||||
changed = self.call_command(module_args)
|
||||
assert not changed
|
||||
|
||||
def test_if_all_methods_catch_exception(self):
|
||||
module_args = {}
|
||||
module_args.update(self.set_default_args())
|
||||
module_args.update({'service_state': 'online'})
|
||||
module_args.update({'unmount_volumes': 'True'})
|
||||
module_args.update({'from_name': 'test_name2'})
|
||||
set_module_args(module_args)
|
||||
my_obj = my_module()
|
||||
if not self.onbox:
|
||||
my_obj.server = MockONTAPConnection('aggregate_fail')
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
my_obj.aggr_get_iter(module_args.get('name'))
|
||||
assert '' in exc.value.args[0]['msg']
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
my_obj.aggregate_online()
|
||||
assert 'Error changing the state of aggregate' in exc.value.args[0]['msg']
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
my_obj.aggregate_offline()
|
||||
assert 'Error changing the state of aggregate' in exc.value.args[0]['msg']
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
my_obj.create_aggr()
|
||||
assert 'Error provisioning aggregate' in exc.value.args[0]['msg']
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
my_obj.delete_aggr()
|
||||
assert 'Error removing aggregate' in exc.value.args[0]['msg']
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
my_obj.rename_aggregate()
|
||||
assert 'Error renaming aggregate' in exc.value.args[0]['msg']
|
||||
with pytest.raises(AnsibleFailJson) as exc:
|
||||
my_obj.asup_log_for_cserver = Mock(return_value=None)
|
||||
my_obj.apply()
|
||||
assert 'Error renaming: aggregate test_name2 does not exist' in exc.value.args[0]['msg']
|
Loading…
Reference in a new issue