1
0
Fork 0
mirror of https://github.com/ansible-collections/community.general.git synced 2024-09-14 20:13:21 +02:00

Fix netapp modules (#76)

* Replace missing netapp parts with own copy.

* Localize final fragment.

* Mark netapps docs fragment as deprecated.

* Drop dependency on netapp.ontap.

* Remove all netapp_e_* modules.

* Remove docs fragment.
This commit is contained in:
Felix Fontein 2020-03-31 09:41:29 +02:00 committed by GitHub
parent 50f8477dbc
commit 6172e56b62
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
125 changed files with 829 additions and 20206 deletions

View file

@ -10,7 +10,6 @@ license_file: COPYING
tags: null
# NOTE: No more dependencies can be added to this list
#dependencies:
# netapp.ontap: '>=0.1.0'
# community.kubernetes: '>=0.1.0'
# ovirt.ovirt_collection: '>=0.1.0'
# ansible.netcommon: '>=0.1.0'

View file

@ -74,41 +74,65 @@ notes:
'''
# Documentation fragment for E-Series
ESERIES = r'''
# Documentation fragment for ONTAP (na_ontap)
NA_ONTAP = r'''
options:
api_username:
required: true
type: str
description:
- The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
api_password:
required: true
type: str
description:
- The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
api_url:
required: true
type: str
description:
- The url to the SANtricity Web Services Proxy or Embedded Web Services API.
Example https://prod-1.wahoo.acme.com/devmgr/v2
hostname:
description:
- The hostname or IP address of the ONTAP instance.
type: str
required: true
username:
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
type: str
required: true
aliases: [ user ]
password:
description:
- Password for the specified user.
type: str
required: true
aliases: [ pass ]
https:
description:
- Enable and disable https
type: bool
default: no
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
required: false
type: str
default: 1
description:
- The ID of the array to manage. This value must be unique for each array.
description:
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(False) used on personally controlled sites using self-signed certificates.
type: bool
default: yes
http_port:
description:
- Override the default port (80 or 443) with this port
type: int
ontapi:
description:
- The ontap api version to use
type: int
use_rest:
description:
- REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
- Always -- will always use the REST API
- Never -- will always use the ZAPI
- Auto -- will try to use the REST Api
default: Auto
choices: ['Never', 'Always', 'Auto']
type: str
requirements:
- A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
- Ansible 2.6
- Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
- Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
- To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
notes:
- The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
- Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
- M(netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
- The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
'''

View file

@ -0,0 +1,744 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import random
import mimetypes
from pprint import pformat
from ansible.module_utils import six
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import open_url
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils._text import to_native
try:
from ansible.module_utils.ansible_release import __version__ as ansible_version
except ImportError:
ansible_version = 'unknown'
try:
from netapp_lib.api.zapi import zapi
HAS_NETAPP_LIB = True
except ImportError:
HAS_NETAPP_LIB = False
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
import ssl
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
HAS_SF_SDK = False
SF_BYTE_MAP = dict(
# Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
bytes=1,
b=1,
kb=1000,
mb=1000 ** 2,
gb=1000 ** 3,
tb=1000 ** 4,
pb=1000 ** 5,
eb=1000 ** 6,
zb=1000 ** 7,
yb=1000 ** 8
)
POW2_BYTE_MAP = dict(
# Here, 1 kb = 1024
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
try:
from solidfire.factory import ElementFactory
from solidfire.custom.models import TimeIntervalFrequency
from solidfire.models import Schedule, ScheduleInfo
HAS_SF_SDK = True
except Exception:
HAS_SF_SDK = False
def has_netapp_lib():
return HAS_NETAPP_LIB
def has_sf_sdk():
return HAS_SF_SDK
def na_ontap_host_argument_spec():
return dict(
hostname=dict(required=True, type='str'),
username=dict(required=True, type='str', aliases=['user']),
password=dict(required=True, type='str', aliases=['pass'], no_log=True),
https=dict(required=False, type='bool', default=False),
validate_certs=dict(required=False, type='bool', default=True),
http_port=dict(required=False, type='int'),
ontapi=dict(required=False, type='int'),
use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto'])
)
def ontap_sf_host_argument_spec():
return dict(
hostname=dict(required=True, type='str'),
username=dict(required=True, type='str', aliases=['user']),
password=dict(required=True, type='str', aliases=['pass'], no_log=True)
)
def aws_cvs_host_argument_spec():
return dict(
api_url=dict(required=True, type='str'),
validate_certs=dict(required=False, type='bool', default=True),
api_key=dict(required=True, type='str'),
secret_key=dict(required=True, type='str')
)
def create_sf_connection(module, port=None):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
if HAS_SF_SDK and hostname and username and password:
try:
return_val = ElementFactory.create(hostname, username, password, port=port)
return return_val
except Exception:
raise Exception("Unable to create SF connection")
else:
module.fail_json(msg="the python SolidFire SDK module is required")
def setup_na_ontap_zapi(module, vserver=None):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
https = module.params['https']
validate_certs = module.params['validate_certs']
port = module.params['http_port']
version = module.params['ontapi']
if HAS_NETAPP_LIB:
# set up zapi
server = zapi.NaServer(hostname)
server.set_username(username)
server.set_password(password)
if vserver:
server.set_vserver(vserver)
if version:
minor = version
else:
minor = 110
server.set_api_version(major=1, minor=minor)
# default is HTTP
if https:
if port is None:
port = 443
transport_type = 'HTTPS'
# HACK to bypass certificate verification
if validate_certs is False:
if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
ssl._create_default_https_context = ssl._create_unverified_context
else:
if port is None:
port = 80
transport_type = 'HTTP'
server.set_transport_type(transport_type)
server.set_port(port)
server.set_server_type('FILER')
return server
else:
module.fail_json(msg="the python NetApp-Lib module is required")
def setup_ontap_zapi(module, vserver=None):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
if HAS_NETAPP_LIB:
# set up zapi
server = zapi.NaServer(hostname)
server.set_username(username)
server.set_password(password)
if vserver:
server.set_vserver(vserver)
# Todo : Replace hard-coded values with configurable parameters.
server.set_api_version(major=1, minor=110)
server.set_port(80)
server.set_server_type('FILER')
server.set_transport_type('HTTP')
return server
else:
module.fail_json(msg="the python NetApp-Lib module is required")
def eseries_host_argument_spec():
"""Retrieve a base argument specification common to all NetApp E-Series modules"""
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
ssid=dict(type='str', required=False, default='1'),
validate_certs=dict(type='bool', required=False, default=True)
))
return argument_spec
class NetAppESeriesModule(object):
"""Base class for all NetApp E-Series modules.
Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
verification, http requests, secure http redirection for embedded web services, and logging setup.
Be sure to add the following lines in the module's documentation section:
extends_documentation_fragment:
- netapp.eseries
:param dict(dict) ansible_options: dictionary of ansible option definitions
:param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
:param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
:param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
:param list(list) required_if: list containing list(s) containing the option, the option value, and then
a list of required options. (optional)
:param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
:param list(list) required_together: list containing list(s) of options that are required together. (optional)
:param bool log_requests: controls whether to log each request (default: True)
"""
DEFAULT_TIMEOUT = 60
DEFAULT_SECURE_PORT = "8443"
DEFAULT_REST_API_PATH = "devmgr/v2/"
DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
"netapp-client-type": "Ansible-%s" % ansible_version}
HTTP_AGENT = "Ansible / %s" % ansible_version
SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
log_requests=True):
argument_spec = eseries_host_argument_spec()
argument_spec.update(ansible_options)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive, required_if=required_if,
required_one_of=required_one_of, required_together=required_together)
args = self.module.params
self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
self.ssid = args["ssid"]
self.url = args["api_url"]
self.log_requests = log_requests
self.creds = dict(url_username=args["api_username"],
url_password=args["api_password"],
validate_certs=args["validate_certs"])
if not self.url.endswith("/"):
self.url += "/"
self.is_embedded_mode = None
self.is_web_services_valid_cache = None
def _check_web_services_version(self):
"""Verify proxy or embedded web services meets minimum version required for module.
The minimum required web services version is evaluated against version supplied through the web services rest
api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
This helper function will update the supplied api url if secure http is not used for embedded web services
:raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
"""
if not self.is_web_services_valid_cache:
url_parts = urlparse(self.url)
if not url_parts.scheme or not url_parts.netloc:
self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
if url_parts.scheme not in ["http", "https"]:
self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
if rc != 200:
self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
try:
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(error)))
major, minor, other, revision = data["version"].split(".")
minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
if not (major > minimum_major or
(major == minimum_major and minor > minimum_minor) or
(major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
" Version required: [%s]." % (data["version"], self.web_services_version))
self.module.log("Web services rest api version met the minimum required version.")
self.is_web_services_valid_cache = True
def is_embedded(self):
"""Determine whether web services server is the embedded web services.
If web services about endpoint fails based on an URLError then the request will be attempted again using
secure http.
:raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
:return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
"""
self._check_web_services_version()
if self.is_embedded_mode is None:
about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
try:
rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
self.is_embedded_mode = not data["runningAsProxy"]
except Exception as error:
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(error)))
return self.is_embedded_mode
def request(self, path, data=None, method='GET', headers=None, ignore_errors=False):
"""Issue an HTTP request to a url, retrieving an optional JSON response.
:param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
:param data: data required for the request (data may be json or any python structured data)
:param str method: request method such as GET, POST, DELETE.
:param dict headers: dictionary containing request headers.
:param bool ignore_errors: forces the request to ignore any raised exceptions.
"""
self._check_web_services_version()
if headers is None:
headers = self.DEFAULT_HEADERS
if not isinstance(data, str) and headers["Content-Type"] == "application/json":
data = json.dumps(data)
if path.startswith("/"):
path = path[1:]
request_url = self.url + self.DEFAULT_REST_API_PATH + path
if self.log_requests or True:
self.module.log(pformat(dict(url=request_url, data=data, method=method)))
return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
def create_multipart_formdata(files, fields=None, send_8kb=False):
"""Create the data for a multipart/form request.
:param list(list) files: list of lists each containing (name, filename, path).
:param list(list) fields: list of lists each containing (key, value).
:param bool send_8kb: only sends the first 8kb of the files (default: False).
"""
boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
data_parts = list()
data = None
if six.PY2: # Generate payload for Python 2
newline = "\r\n"
if fields is not None:
for key, value in fields:
data_parts.extend(["--%s" % boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
value])
for name, filename, path in files:
with open(path, "rb") as fh:
value = fh.read(8192) if send_8kb else fh.read()
data_parts.extend(["--%s" % boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
"Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
"",
value])
data_parts.extend(["--%s--" % boundary, ""])
data = newline.join(data_parts)
else:
newline = six.b("\r\n")
if fields is not None:
for key, value in fields:
data_parts.extend([six.b("--%s" % boundary),
six.b('Content-Disposition: form-data; name="%s"' % key),
six.b(""),
six.b(value)])
for name, filename, path in files:
with open(path, "rb") as fh:
value = fh.read(8192) if send_8kb else fh.read()
data_parts.extend([six.b("--%s" % boundary),
six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
six.b(""),
value])
data_parts.extend([six.b("--%s--" % boundary), b""])
data = newline.join(data_parts)
headers = {
"Content-Type": "multipart/form-data; boundary=%s" % boundary,
"Content-Length": str(len(data))}
return headers, data
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
"""Issue an HTTP request to a url, retrieving an optional JSON response."""
if headers is None:
headers = {"Content-Type": "application/json", "Accept": "application/json"}
headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
if not http_agent:
http_agent = "Ansible / %s" % ansible_version
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version,
category="Information", event="setup", autosupport="false"):
ems_log = zapi.NaElement('ems-autosupport-log')
# Host name invoking the API.
ems_log.add_new_child("computer-name", name)
# ID of event. A user defined event-id, range [0..2^32-2].
ems_log.add_new_child("event-id", id)
# Name of the application invoking the API.
ems_log.add_new_child("event-source", source)
# Version of application invoking the API.
ems_log.add_new_child("app-version", version)
# Application defined category of the event.
ems_log.add_new_child("category", category)
# Description of event to log. An application defined message to log.
ems_log.add_new_child("event-description", event)
ems_log.add_new_child("log-level", "6")
ems_log.add_new_child("auto-support", autosupport)
server.invoke_successfully(ems_log, True)
def get_cserver_zapi(server):
vserver_info = zapi.NaElement('vserver-get-iter')
query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
query = zapi.NaElement('query')
query.add_child_elem(query_details)
vserver_info.add_child_elem(query)
result = server.invoke_successfully(vserver_info,
enable_tunneling=False)
attribute_list = result.get_child_by_name('attributes-list')
vserver_list = attribute_list.get_child_by_name('vserver-info')
return vserver_list.get_child_content('vserver-name')
def get_cserver(connection, is_rest=False):
if not is_rest:
return get_cserver_zapi(connection)
params = {'fields': 'type'}
api = "private/cli/vserver"
json, error = connection.get(api, params)
if json is None or error is not None:
# exit if there is an error or no data
return None
vservers = json.get('records')
if vservers is not None:
for vserver in vservers:
if vserver['type'] == 'admin': # cluster admin
return vserver['vserver']
if len(vservers) == 1: # assume vserver admin
return vservers[0]['vserver']
return None
class OntapRestAPI(object):
def __init__(self, module, timeout=60):
self.module = module
self.username = self.module.params['username']
self.password = self.module.params['password']
self.hostname = self.module.params['hostname']
self.use_rest = self.module.params['use_rest']
self.verify = self.module.params['validate_certs']
self.timeout = timeout
self.url = 'https://' + self.hostname + '/api/'
self.errors = list()
self.debug_logs = list()
self.check_required_library()
def check_required_library(self):
if not HAS_REQUESTS:
self.module.fail_json(msg=missing_required_lib('requests'))
def send_request(self, method, api, params, json=None, return_status_code=False):
''' send http request and process reponse, including error conditions '''
url = self.url + api
status_code = None
content = None
json_dict = None
json_error = None
error_details = None
def get_json(response):
''' extract json, and error message if present '''
try:
json = response.json()
except ValueError:
return None, None
error = json.get('error')
return json, error
try:
response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json)
content = response.content # for debug purposes
status_code = response.status_code
# If the response was successful, no Exception will be raised
response.raise_for_status()
json_dict, json_error = get_json(response)
except requests.exceptions.HTTPError as err:
__, json_error = get_json(response)
if json_error is None:
self.log_error(status_code, 'HTTP error: %s' % err)
error_details = str(err)
# If an error was reported in the json payload, it is handled below
except requests.exceptions.ConnectionError as err:
self.log_error(status_code, 'Connection error: %s' % err)
error_details = str(err)
except Exception as err:
self.log_error(status_code, 'Other error: %s' % err)
error_details = str(err)
if json_error is not None:
self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
error_details = json_error
self.log_debug(status_code, content)
if return_status_code:
return status_code, error_details
return json_dict, error_details
def get(self, api, params):
method = 'GET'
return self.send_request(method, api, params)
def post(self, api, data, params=None):
method = 'POST'
return self.send_request(method, api, params, json=data)
def patch(self, api, data, params=None):
method = 'PATCH'
return self.send_request(method, api, params, json=data)
def delete(self, api, data, params=None):
method = 'DELETE'
return self.send_request(method, api, params, json=data)
def _is_rest(self, used_unsupported_rest_properties=None):
if self.use_rest == "Always":
if used_unsupported_rest_properties:
error = "REST API currently does not support '%s'" % \
', '.join(used_unsupported_rest_properties)
return True, error
else:
return True, None
if self.use_rest == 'Never' or used_unsupported_rest_properties:
# force ZAPI if requested or if some parameter requires it
return False, None
method = 'HEAD'
api = 'cluster/software'
status_code, __ = self.send_request(method, api, params=None, return_status_code=True)
if status_code == 200:
return True, None
return False, None
def is_rest(self, used_unsupported_rest_properties=None):
''' only return error if there is a reason to '''
use_rest, error = self._is_rest(used_unsupported_rest_properties)
if used_unsupported_rest_properties is None:
return use_rest
return use_rest, error
def log_error(self, status_code, message):
self.errors.append(message)
self.debug_logs.append((status_code, message))
def log_debug(self, status_code, content):
self.debug_logs.append((status_code, content))
class AwsCvsRestAPI(object):
def __init__(self, module, timeout=60):
self.module = module
self.api_key = self.module.params['api_key']
self.secret_key = self.module.params['secret_key']
self.api_url = self.module.params['api_url']
self.verify = self.module.params['validate_certs']
self.timeout = timeout
self.url = 'https://' + self.api_url + '/v1/'
self.check_required_library()
def check_required_library(self):
if not HAS_REQUESTS:
self.module.fail_json(msg=missing_required_lib('requests'))
def send_request(self, method, api, params, json=None):
''' send http request and process reponse, including error conditions '''
url = self.url + api
status_code = None
content = None
json_dict = None
json_error = None
error_details = None
headers = {
'Content-type': "application/json",
'api-key': self.api_key,
'secret-key': self.secret_key,
'Cache-Control': "no-cache",
}
def get_json(response):
''' extract json, and error message if present '''
try:
json = response.json()
except ValueError:
return None, None
success_code = [200, 201, 202]
if response.status_code not in success_code:
error = json.get('message')
else:
error = None
return json, error
try:
response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
status_code = response.status_code
# If the response was successful, no Exception will be raised
json_dict, json_error = get_json(response)
except requests.exceptions.HTTPError as err:
__, json_error = get_json(response)
if json_error is None:
error_details = str(err)
except requests.exceptions.ConnectionError as err:
error_details = str(err)
except Exception as err:
error_details = str(err)
if json_error is not None:
error_details = json_error
return json_dict, error_details
# If an error was reported in the json payload, it is handled below
def get(self, api, params=None):
method = 'GET'
return self.send_request(method, api, params)
def post(self, api, data, params=None):
method = 'POST'
return self.send_request(method, api, params, json=data)
def patch(self, api, data, params=None):
method = 'PATCH'
return self.send_request(method, api, params, json=data)
def put(self, api, data, params=None):
method = 'PUT'
return self.send_request(method, api, params, json=data)
def delete(self, api, data, params=None):
method = 'DELETE'
return self.send_request(method, api, params, json=data)
def get_state(self, jobId):
""" Method to get the state of the job """
method = 'GET'
response, status_code = self.get('Jobs/%s' % jobId)
while str(response['state']) not in 'done':
response, status_code = self.get('Jobs/%s' % jobId)
return 'done'

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_alerts.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_amg.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_amg_role.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_amg_sync.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_asup.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_auditlog.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_auth.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_drive_firmware.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_facts.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_firmware.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_flashcache.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_global.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_host.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_hostgroup.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_iscsi_interface.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_iscsi_target.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_ldap.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_lun_mapping.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_mgmt_interface.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_snapshot_group.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_snapshot_images.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_snapshot_volume.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_storage_system.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_storagepool.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_syslog.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_volume.py

View file

@ -1 +0,0 @@
./storage/netapp/netapp_e_volume_copy.py

View file

@ -18,7 +18,7 @@ module: na_cdot_aggregate
short_description: Manage NetApp cDOT aggregates.
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -79,7 +79,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -18,7 +18,7 @@ module: na_cdot_license
short_description: Manage NetApp cDOT protocol and feature licenses
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -133,7 +133,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -18,7 +18,7 @@ module: na_cdot_lun
short_description: Manage NetApp cDOT luns
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -118,7 +118,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -18,7 +18,7 @@ module: na_cdot_qtree
short_description: Manage qtrees
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -83,7 +83,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -18,7 +18,7 @@ module: na_cdot_svm
short_description: Manage NetApp cDOT svm
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -87,7 +87,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -18,7 +18,7 @@ module: na_cdot_user
short_description: useradmin configuration and management
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -108,7 +108,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -18,7 +18,7 @@ module: na_cdot_user_role
short_description: useradmin configuration and management
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -83,7 +83,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -18,7 +18,7 @@ module: na_cdot_volume
short_description: Manage NetApp cDOT volumes
extends_documentation_fragment:
- community.general.netapp.ontap
- community.general._netapp.ontap
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
@ -132,7 +132,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()

View file

@ -19,7 +19,7 @@ deprecated:
alternative: Use M(na_ontap_info) instead.
author: Piotr Olczak (@dprts) <polczak@redhat.com>
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
- community.general._netapp.na_ontap
short_description: NetApp information gatherer
description:
@ -119,7 +119,7 @@ ontap_facts:
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
try:
import xmltodict

View file

@ -1,280 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_alerts
short_description: NetApp E-Series manage email notification settings
description:
- Certain E-Series systems have the capability to send email notifications on potentially critical events.
- This module will allow the owner of the system to specify email recipients for these messages.
author: Michael Price (@lmprice)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Enable/disable the sending of email-based alerts.
default: enabled
required: false
choices:
- enabled
- disabled
server:
description:
- A fully qualified domain name, IPv4 address, or IPv6 address of a mail server.
- To use a fully qualified domain name, you must configure a DNS server on both controllers using
M(netapp_e_mgmt_interface).
- Required when I(state=enabled).
required: no
sender:
description:
- This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account.
- Required when I(state=enabled).
required: no
contact:
description:
- Allows the owner to specify some free-form contact information to be included in the emails.
- This is typically utilized to provide a contact phone number.
required: no
recipients:
description:
- The email addresses that will receive the email notifications.
- Required when I(state=enabled).
required: no
test:
description:
- When a change is detected in the configuration, a test email will be sent.
- This may take a few minutes to process.
- Only applicable if I(state=enabled).
default: no
type: bool
log_path:
description:
- Path to a file on the Ansible control node to be used for debug logging
required: no
notes:
- Check mode is supported.
- Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples
of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical
events.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
'''
EXAMPLES = """
- name: Enable email-based alerting
netapp_e_alerts:
state: enabled
sender: noreply@example.com
server: mail@example.com
contact: "Phone: 1-555-555-5555"
recipients:
- name1@example.com
- name2@example.com
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable alerting
netapp_e_alerts:
state: disabled
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
"""
import json
import logging
from pprint import pformat
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Alerts(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled',
choices=['enabled', 'disabled']),
server=dict(type='str', required=False, ),
sender=dict(type='str', required=False, ),
contact=dict(type='str', required=False, ),
recipients=dict(type='list', required=False, ),
test=dict(type='bool', required=False, default=False, ),
log_path=dict(type='str', required=False),
))
required_if = [
['state', 'enabled', ['server', 'sender', 'recipients']]
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
args = self.module.params
self.alerts = args['state'] == 'enabled'
self.server = args['server']
self.sender = args['sender']
self.contact = args['contact']
self.recipients = args['recipients']
self.test = args['test']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
# Very basic validation on email addresses: xx@yy.zz
email = re.compile(r"[^@]+@[^@]+\.[^@]+")
if self.sender and not email.match(self.sender):
self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender)
if self.recipients is not None:
for recipient in self.recipients:
if not email.match(recipient):
self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient)
if len(self.recipients) < 1:
self.module.fail_json(msg="At least one recipient address must be specified.")
def get_configuration(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, headers=HEADERS,
**self.creds)
self._logger.info("Current config: %s", pformat(result))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.alerts:
body = dict(alertingEnabled=True)
if not config['alertingEnabled']:
update = True
body.update(emailServerAddress=self.server)
if config['emailServerAddress'] != self.server:
update = True
body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True)
if self.contact and (self.contact != config['additionalContactInformation']
or not config['sendAdditionalContactInformation']):
update = True
body.update(emailSenderAddress=self.sender)
if config['emailSenderAddress'] != self.sender:
update = True
self.recipients.sort()
if config['recipientEmailAddresses']:
config['recipientEmailAddresses'].sort()
body.update(recipientEmailAddresses=self.recipients)
if config['recipientEmailAddresses'] != self.recipients:
update = True
elif config['alertingEnabled']:
body = dict(alertingEnabled=False)
update = True
self._logger.debug(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def send_test_email(self):
"""Send a test email to verify that the provided configuration is valid and functional."""
if not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/device-alerts/alert-email-test' % self.ssid,
timeout=300, method='POST', headers=HEADERS, **self.creds)
if result['response'] != 'emailSentOK':
self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]."
% (result['response'], self.ssid))
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
update = self.update_configuration()
if self.test and update:
self._logger.info("An update was detected and test=True, running a test.")
self.send_test_email()
if self.alerts:
msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender)
else:
msg = 'Alerting has been disabled.'
self.module.exit_json(msg=msg, changed=update, )
def __call__(self, *args, **kwargs):
self.update()
def main():
alerts = Alerts()
alerts()
if __name__ == '__main__':
main()

View file

@ -1,255 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_amg
short_description: NetApp E-Series create, remove, and update asynchronous mirror groups
description:
- Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
name:
description:
- The name of the async array you wish to target, or create.
- If C(state) is present and the name isn't found, it will attempt to create.
required: yes
secondaryArrayId:
description:
- The ID of the secondary array to be used in mirroring process
required: yes
syncIntervalMinutes:
description:
- The synchronization interval in minutes
default: 10
manualSync:
description:
- Setting this to true will cause other synchronization values to be ignored
type: bool
default: 'no'
recoveryWarnThresholdMinutes:
description:
- Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
default: 20
repoUtilizationWarnThreshold:
description:
- Recovery point warning threshold
default: 80
interfaceType:
description:
- The intended protocol to use if both Fibre and iSCSI are available.
choices:
- iscsi
- fibre
syncWarnThresholdMinutes:
description:
- The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
default: 10
state:
description:
- A C(state) of present will either create or update the async mirror group.
- A C(state) of absent will remove the async mirror group.
choices: [ absent, present ]
required: yes
'''
EXAMPLES = """
- name: AMG removal
na_eseries_amg:
state: absent
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
- name: AMG create
netapp_e_amg:
state: present
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
"""
RETURN = """
msg:
description: Successful creation
returned: success
type: str
sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
""" # NOQA
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def has_match(module, ssid, api_url, api_pwd, api_usr, body):
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
label_exists = False
matches_spec = False
current_state = None
async_id = None
api_data = None
desired_name = body.get('name')
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
try:
rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc())
for async_group in data:
if async_group['label'] == desired_name:
label_exists = True
api_data = async_group
async_id = async_group['groupRef']
current_state = dict(
syncIntervalMinutes=async_group['syncIntervalMinutes'],
syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
)
if current_state == desired_state:
matches_spec = True
return label_exists, matches_spec, api_data, async_id
def create_async(module, ssid, api_url, api_pwd, api_usr, body):
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
post_data = json.dumps(body)
try:
rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
if new_name:
desired_state['new_name'] = new_name
post_data = json.dumps(desired_state)
try:
rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
url_username=user, url_password=pwd)
except Exception as e:
module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def remove_amg(module, ssid, api_url, pwd, user, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
try:
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return
def main():
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
new_name=dict(required=False, type='str'),
secondaryArrayId=dict(required=True, type='str'),
syncIntervalMinutes=dict(required=False, default=10, type='int'),
manualSync=dict(required=False, default=False, type='bool'),
recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
state=dict(required=True, choices=['present', 'absent']),
syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
new_name = p.pop('new_name')
state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
if state == 'present':
if name_exists and spec_matches:
module.exit_json(changed=False, msg="Desired state met", **api_data)
elif name_exists and not spec_matches:
results = update_async(module, ssid, api_url, pwd, user,
p, new_name, async_id)
module.exit_json(changed=True,
msg="Async mirror group updated", async_id=async_id,
**results)
elif not name_exists:
results = create_async(module, ssid, api_url, user, pwd, p)
module.exit_json(changed=True, **results)
elif state == 'absent':
if name_exists:
remove_amg(module, ssid, api_url, pwd, user, async_id)
module.exit_json(changed=True, msg="Async mirror group removed.",
async_id=async_id)
else:
module.exit_json(changed=False,
msg="Async Mirror group: %s already absent" % p['name'])
if __name__ == '__main__':
main()

View file

@ -1,233 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_amg_role
short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
type: bool
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
type: bool
'''
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: str
sample: "No Async Mirror Group with the name."
"""
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()

View file

@ -1,260 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_amg_sync
short_description: NetApp E-Series conduct synchronization actions on asynchronous mirror groups.
description:
- Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- The ID of the storage array containing the AMG you wish to target
name:
description:
- The name of the async mirror group you wish to target
required: yes
state:
description:
- The synchronization action you'd like to take.
- If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in
progress, it will return with an OK status.
- If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
choices:
- running
- suspended
required: yes
delete_recovery_point:
description:
- Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
- If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last
failures point will be deleted and synchronization will continue.
- If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary
and the failures point will be preserved.
- "NOTE: This only has impact for newly launched syncs."
type: bool
default: no
'''
EXAMPLES = """
- name: start AMG async
netapp_e_amg_sync:
name: "{{ amg_sync_name }}"
state: running
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
"""
RETURN = """
json:
description: The object attributes of the AMG.
returned: success
type: str
example:
{
"changed": false,
"connectionType": "fc",
"groupRef": "3700000060080E5000299C24000006EF57ACAC70",
"groupState": "optimal",
"id": "3700000060080E5000299C24000006EF57ACAC70",
"label": "made_with_ansible",
"localRole": "primary",
"mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
"orphanGroup": false,
"recoveryPointAgeAlertThresholdMinutes": 20,
"remoteRole": "secondary",
"remoteTarget": {
"nodeName": {
"ioInterfaceType": "fc",
"iscsiNodeName": null,
"remoteNodeWWN": "20040080E5299F1C"
},
"remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
"scsiinitiatorTargetBaseProperties": {
"ioInterfaceType": "fc",
"iscsiinitiatorTargetBaseParameters": null
}
},
"remoteTargetId": "ansible2",
"remoteTargetName": "Ansible2",
"remoteTargetWwn": "60080E5000299F880000000056A25D56",
"repositoryUtilizationWarnThreshold": 80,
"roleChangeProgress": "none",
"syncActivity": "idle",
"syncCompletionTimeAlertThresholdMinutes": 10,
"syncIntervalMinutes": 10,
"worldWideName": "60080E5000299C24000006EF57ACAC70"
}
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class AMGsync(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
name=dict(required=True, type='str'),
ssid=dict(required=True, type='str'),
state=dict(required=True, type='str', choices=['running', 'suspended']),
delete_recovery_point=dict(required=False, type='bool', default=False)
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.state = args['state']
self.delete_recovery_point = args['delete_recovery_point']
try:
self.user = args['api_username']
self.pwd = args['api_password']
self.url = args['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username"
"and api_password and api_url to the module.")
self.certs = args['validate_certs']
self.post_headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
self.amg_id, self.amg_obj = self.get_amg()
def get_amg(self):
endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
(rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
headers=self.post_headers)
try:
amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
except IndexError:
self.module.fail_json(
msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
return amg_id, amg_obj
@property
def current_state(self):
amg_id, amg_obj = self.get_amg()
return amg_obj['syncActivity']
def run_sync_action(self):
# If we get to this point we know that the states differ, and there is no 'err' state,
# so no need to revalidate
post_body = dict()
if self.state == 'running':
if self.current_state == 'idle':
if self.delete_recovery_point:
post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
suffix = 'sync'
else:
# In a suspended state
suffix = 'resume'
else:
suffix = 'suspend'
endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
(rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
ignore_errors=True)
if not str(rc).startswith('2'):
self.module.fail_json(msg=str(resp['errorMessage']))
return resp
def apply(self):
state_map = dict(
running=['active'],
suspended=['userSuspended', 'internallySuspended', 'paused'],
err=['unkown', '_UNDEFINED'])
if self.current_state not in state_map[self.state]:
if self.current_state in state_map['err']:
self.module.fail_json(
msg="The sync is a state of '%s', this requires manual intervention. " +
"Please investigate and try again" % self.current_state)
else:
self.amg_obj = self.run_sync_action()
(ret, amg) = self.get_amg()
self.module.exit_json(changed=False, **amg)
def main():
sync = AMGsync()
sync.apply()
if __name__ == '__main__':
main()

View file

@ -1,309 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_asup
short_description: NetApp E-Series manage auto-support settings
description:
- Allow the auto-support settings to be configured for an individual E-Series storage-system
author: Michael Price (@lmprice)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Enable/disable the E-Series auto-support configuration.
- When this option is enabled, configuration, logs, and other support-related information will be relayed
to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
be collected.
default: enabled
choices:
- enabled
- disabled
aliases:
- asup
- auto_support
- autosupport
active:
description:
- Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
possible that the bundle did not contain all of the required information at the time of the event.
Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
of support data in order ot resolve the problem.
- Only applicable if I(state=enabled).
default: yes
type: bool
start:
description:
- A start hour may be specified in a range from 0 to 23 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- start_time
default: 0
end:
description:
- An end hour may be specified in a range from 1 to 24 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- end_time
default: 24
days:
description:
- A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
of the provided days.
choices:
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- sunday
required: no
aliases:
- days_of_week
- schedule_days
verbose:
description:
- Provide the full ASUP configuration in the return.
default: no
required: no
type: bool
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
disabled if desired.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
'''
EXAMPLES = """
- name: Enable ASUP and allow pro-active retrieval of bundles
netapp_e_asup:
state: enabled
active: yes
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
netapp_e_asup:
start: 17
end: 20
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
asup:
description:
- True if ASUP is enabled.
returned: on success
sample: True
type: bool
active:
description:
- True if the active option has been enabled.
returned: on success
sample: True
type: bool
cfg:
description:
- Provide the full ASUP configuration.
returned: on success when I(verbose=true).
type: complex
contains:
asupEnabled:
description:
- True if ASUP has been enabled.
type: bool
onDemandEnabled:
description:
- True if ASUP active monitoring has been enabled.
type: bool
daysOfWeek:
description:
- The days of the week that ASUP bundles will be sent.
type: list
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Asup(object):
DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
choices=['enabled', 'disabled']),
active=dict(type='bool', required=False, default=True, ),
days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
choices=self.DAYS_OPTIONS),
start=dict(type='int', required=False, default=0, aliases=['start_time']),
end=dict(type='int', required=False, default=24, aliases=['end_time']),
verbose=dict(type='bool', required=False, default=False),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.asup = args['state'] == 'enabled'
self.active = args['active']
self.days = args['days']
self.start = args['start']
self.end = args['end']
self.verbose = args['verbose']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.start >= self.end:
self.module.fail_json(msg="The value provided for the start time is invalid."
" It must be less than the end time.")
if self.start < 0 or self.start > 23:
self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
else:
self.start = self.start * 60
if self.end < 1 or self.end > 24:
self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
else:
self.end = min(self.end * 60, 1439)
if not self.days:
self.days = self.DAYS_OPTIONS
def get_configuration(self):
try:
(rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
if not (result['asupCapable'] and result['onDemandCapable']):
self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.asup:
body = dict(asupEnabled=True)
if not config['asupEnabled']:
update = True
if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
update = True
body.update(dict(onDemandEnabled=self.active,
remoteDiagsEnabled=self.active))
self.days.sort()
config['schedule']['daysOfWeek'].sort()
body['schedule'] = dict(daysOfWeek=self.days,
dailyMinTime=self.start,
dailyMaxTime=self.end,
weeklyMinTime=self.start,
weeklyMaxTime=self.end)
if self.days != config['schedule']['daysOfWeek']:
update = True
if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
update = True
elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
update = True
elif config['asupEnabled']:
body = dict(asupEnabled=False)
update = True
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'device-asup', method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_configuration()
cfg = self.get_configuration()
if self.verbose:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
else:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Asup()
settings()
if __name__ == '__main__':
main()

View file

@ -1,281 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_auditlog
short_description: NetApp E-Series manage audit-log configuration
description:
- This module allows an e-series storage system owner to set audit-log configuration parameters.
author: Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
max_records:
description:
- The maximum number log messages audit-log will retain.
- Max records must be between and including 100 and 50000.
default: 50000
log_level:
description: Filters the log messages according to the specified log level selection.
choices:
- all
- writeOnly
default: writeOnly
full_policy:
description: Specifies what audit-log should do once the number of entries approach the record limit.
choices:
- overWrite
- preventSystemAccess
default: overWrite
threshold:
description:
- This is the memory full percent threshold that audit-log will start issuing warning messages.
- Percent range must be between and including 60 and 90.
default: 90
force:
description:
- Forces the audit-log configuration to delete log history when log messages fullness cause immediate
warning or full condition.
- Warning! This will cause any existing audit-log messages to be deleted.
- This is only applicable for I(full_policy=preventSystemAccess).
type: bool
default: no
log_path:
description: A local path to a file to be used for debug logging.
required: no
notes:
- Check mode is supported.
- This module is currently only supported with the Embedded Web Services API v3.0 and higher.
'''
EXAMPLES = """
- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity.
netapp_e_auditlog:
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
max_records: 50000
log_level: all
full_policy: preventSystemAccess
threshold: 60
log_path: /path/to/log_file.log
- name: Define audit-log utilize the default values.
netapp_e_auditlog:
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
- name: Force audit-log configuration when full or warning conditions occur while enacting preventSystemAccess policy.
netapp_e_auditlog:
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
max_records: 5000
log_level: all
full_policy: preventSystemAccess
threshold: 60
force: yes
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
try:
from urlparse import urlparse, urlunparse
except Exception:
from urllib.parse import urlparse, urlunparse
class AuditLog(object):
"""Audit-log module configuration class."""
MAX_RECORDS = 50000
HEADERS = {"Content-Type": "application/json",
"Accept": "application/json"}
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
max_records=dict(type="int", default=50000),
log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]),
full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]),
threshold=dict(type="int", default=90),
force=dict(type="bool", default=False),
log_path=dict(type='str', required=False)))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
args = self.module.params
self.max_records = args["max_records"]
if self.max_records < 100 or self.max_records > self.MAX_RECORDS:
self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]"
% self.max_records)
self.threshold = args["threshold"]
if self.threshold < 60 or self.threshold > 90:
self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold)
self.log_level = args["log_level"]
self.full_policy = args["full_policy"]
self.force = args["force"]
self.ssid = args['ssid']
self.url = args['api_url']
if not self.url.endswith('/'):
self.url += '/'
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
# logging setup
log_path = args['log_path']
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
self.proxy_used = self.is_proxy()
self._logger.info(self.proxy_used)
self.check_mode = self.module.check_mode
def is_proxy(self):
"""Determine whether the API is embedded or proxy."""
try:
# replace http url path with devmgr/utils/about
about_url = list(urlparse(self.url))
about_url[2] = "devmgr/utils/about"
about_url = urlunparse(about_url)
rc, data = request(about_url, timeout=300, headers=self.HEADERS, **self.creds)
return data["runningAsProxy"]
except Exception as err:
self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def get_configuration(self):
"""Retrieve the existing audit-log configurations.
:returns: dictionary containing current audit-log configuration
"""
try:
if self.proxy_used:
rc, data = request(self.url + "audit-log/config", timeout=300, headers=self.HEADERS, **self.creds)
else:
rc, data = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid,
timeout=300, headers=self.HEADERS, **self.creds)
return data
except Exception as err:
self.module.fail_json(msg="Failed to retrieve the audit-log configuration! "
"Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def build_configuration(self):
"""Build audit-log expected configuration.
:returns: Tuple containing update boolean value and dictionary of audit-log configuration
"""
config = self.get_configuration()
current = dict(auditLogMaxRecords=config["auditLogMaxRecords"],
auditLogLevel=config["auditLogLevel"],
auditLogFullPolicy=config["auditLogFullPolicy"],
auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"])
body = dict(auditLogMaxRecords=self.max_records,
auditLogLevel=self.log_level,
auditLogFullPolicy=self.full_policy,
auditLogWarningThresholdPct=self.threshold)
update = current != body
self._logger.info(pformat(update))
self._logger.info(pformat(body))
return update, body
def delete_log_messages(self):
"""Delete all audit-log messages."""
self._logger.info("Deleting audit-log messages...")
try:
if self.proxy_used:
rc, result = request(self.url + "audit-log?clearAll=True", timeout=300,
method="DELETE", headers=self.HEADERS, **self.creds)
else:
rc, result = request(self.url + "storage-systems/%s/audit-log?clearAll=True" % self.ssid, timeout=300,
method="DELETE", headers=self.HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self, update=None, body=None, attempt_recovery=True):
"""Update audit-log configuration."""
if update is None or body is None:
update, body = self.build_configuration()
if update and not self.check_mode:
try:
if self.proxy_used:
rc, result = request(self.url + "storage-systems/audit-log/config", timeout=300,
data=json.dumps(body), method='POST', headers=self.HEADERS,
ignore_errors=True, **self.creds)
else:
rc, result = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, timeout=300,
data=json.dumps(body), method='POST', headers=self.HEADERS,
ignore_errors=True, **self.creds)
if rc == 422:
if self.force and attempt_recovery:
self.delete_log_messages()
update = self.update_configuration(update, body, False)
else:
self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(rc, result)))
except Exception as error:
self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(error)))
return update
def update(self):
"""Update the audit-log configuration."""
update = self.update_configuration()
self.module.exit_json(msg="Audit-log update complete", changed=update)
def __call__(self):
self.update()
def main():
auditlog = AuditLog()
auditlog()
if __name__ == "__main__":
main()

View file

@ -1,275 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_auth
short_description: NetApp E-Series set or update the password for a storage array.
description:
- Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
Services proxy. Note, all storage arrays do not have a Monitor or RO role.
author: Kevin Hulquest (@hulquest)
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
name:
description:
- The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
the ID instead.
required: False
ssid:
description:
- the identifier of the storage array in the Web Services Proxy.
required: False
set_admin:
description:
- Boolean value on whether to update the admin password. If set to false then the RO account is updated.
type: bool
default: False
current_password:
description:
- The current admin password. This is not required if the password hasn't been set before.
required: False
new_password:
description:
- The password you would like to set. Cannot be more than 30 characters.
required: True
api_url:
description:
- The full API url.
- "Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API
- This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API
- This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
- name: Test module
netapp_e_auth:
name: trex
current_password: OldPasswd
new_password: NewPasswd
set_admin: yes
api_url: '{{ netapp_api_url }}'
api_username: '{{ netapp_api_username }}'
api_password: '{{ netapp_api_password }}'
'''
RETURN = '''
msg:
description: Success message
returned: success
type: str
sample: "Password Updated Successfully"
'''
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
"x-netapp-password-validate-method": "none"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_ssid(module, name, api_url, user, pwd):
count = 0
all_systems = 'storage-systems'
systems_url = api_url + all_systems
rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
for system in data:
if system['name'] == name:
count += 1
if count > 1:
module.fail_json(
msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
"Use the id instead")
else:
ssid = system['id']
else:
continue
if count == 0:
module.fail_json(msg="No storage array with the name %s was found" % name)
else:
return ssid
def get_pwd_status(module, ssid, api_url, user, pwd):
pwd_status = "storage-systems/%s/passwords" % ssid
url = api_url + pwd_status
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
return data['readOnlyPasswordSet'], data['adminPasswordSet']
except HTTPError as e:
module.fail_json(msg="There was an issue with connecting, please check that your "
"endpoint is properly defined and your credentials are correct: %s" % to_native(e))
def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
"""Update the stored storage-system password"""
update_pwd = 'storage-systems/%s' % ssid
url = api_url + update_pwd
post_body = json.dumps(dict(storedPassword=pwd))
try:
rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
url_password=api_pwd, validate_certs=module.validate_certs)
return rc, data
except Exception as e:
module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, to_native(e)))
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
"""Set the storage-system password"""
set_pass = "storage-systems/%s/passwords" % ssid
url = api_url + set_pass
if not current_password:
current_password = ""
post_body = json.dumps(
dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
ignore_errors=True, validate_certs=module.validate_certs)
except Exception as e:
module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, to_native(e)),
exception=traceback.format_exc())
if rc == 422:
post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
except Exception:
# TODO(lorenp): Resolve ignored rc, data
module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
if int(rc) >= 300:
module.fail_json(msg="Failed to set system password. Id [%s] Code [%s]. Error [%s]" % (ssid, rc, data))
rc, update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
if int(rc) < 300:
return update_data
else:
module.fail_json(msg="%s:%s" % (rc, update_data))
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=False, type='str'),
ssid=dict(required=False, type='str'),
current_password=dict(required=False, no_log=True),
new_password=dict(required=True, no_log=True),
set_admin=dict(required=True, type='bool'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
required_one_of=[['name', 'ssid']])
name = module.params['name']
ssid = module.params['ssid']
current_password = module.params['current_password']
new_password = module.params['new_password']
set_admin = module.params['set_admin']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
module.validate_certs = module.params['validate_certs']
if not api_url.endswith('/'):
api_url += '/'
if name:
ssid = get_ssid(module, name, api_url, user, pwd)
ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
if admin_pwd and not current_password:
module.fail_json(
msg="Admin account has a password set. " +
"You must supply current_password in order to update the RO or Admin passwords")
if len(new_password) > 30:
module.fail_json(msg="Passwords must not be greater than 30 characters in length")
result = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
new_password=new_password, set_admin=set_admin)
module.exit_json(changed=True, msg="Password Updated Successfully",
password_set=result['passwordSet'],
password_status=result['passwordStatus'])
if __name__ == '__main__':
main()

View file

@ -1,215 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_drive_firmware
short_description: NetApp E-Series manage drive firmware
description:
- Ensure drive firmware version is activated on specified drive model.
author:
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
firmware:
description:
- list of drive firmware file paths.
- NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
type: list
required: True
wait_for_completion:
description:
- This flag will cause module to wait for any upgrade actions to complete.
type: bool
default: false
ignore_inaccessible_drives:
description:
- This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible.
type: bool
default: false
upgrade_drives_online:
description:
- This flag will determine whether drive firmware can be upgrade while drives are accepting I/O.
- When I(upgrade_drives_online==False) stop all I/O before running task.
type: bool
default: true
'''
EXAMPLES = """
- name: Ensure correct firmware versions
nac_santricity_drive_firmware:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
firmware: "path/to/drive_firmware"
wait_for_completion: true
ignore_inaccessible_drives: false
"""
RETURN = """
msg:
description: Whether any drive firmware was upgraded and whether it is in progress.
type: str
returned: always
sample:
{ changed: True, upgrade_in_process: True }
"""
import os
import re
from time import sleep
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata
from ansible.module_utils._text import to_native, to_text, to_bytes
class NetAppESeriesDriveFirmware(NetAppESeriesModule):
WAIT_TIMEOUT_SEC = 60 * 15
def __init__(self):
ansible_options = dict(
firmware=dict(type="list", required=True),
wait_for_completion=dict(type="bool", default=False),
ignore_inaccessible_drives=dict(type="bool", default=False),
upgrade_drives_online=dict(type="bool", default=True))
super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True)
args = self.module.params
self.firmware_list = args["firmware"]
self.wait_for_completion = args["wait_for_completion"]
self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"]
self.upgrade_drives_online = args["upgrade_drives_online"]
self.upgrade_list_cache = None
self.upgrade_required_cache = None
self.upgrade_in_progress = False
self.drive_info_cache = None
def upload_firmware(self):
"""Ensure firmware has been upload prior to uploaded."""
for firmware in self.firmware_list:
firmware_name = os.path.basename(firmware)
files = [("file", firmware_name, firmware)]
headers, data = create_multipart_formdata(files)
try:
rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
except Exception as error:
self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error)))
def upgrade_list(self):
"""Determine whether firmware is compatible with the specified drives."""
if self.upgrade_list_cache is None:
self.upgrade_list_cache = list()
try:
rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid)
# Create upgrade list, this ensures only the firmware uploaded is applied
for firmware in self.firmware_list:
filename = os.path.basename(firmware)
for uploaded_firmware in response["compatibilities"]:
if uploaded_firmware["filename"] == filename:
# Determine whether upgrade is required
drive_reference_list = []
for drive in uploaded_firmware["compatibleDrives"]:
try:
rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"]))
# Add drive references that are supported and differ from current firmware
if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and
uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]):
if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]):
drive_reference_list.append(drive["driveRef"])
if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online:
self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]."
% (self.ssid, drive["driveRef"]))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]."
% (self.ssid, drive["driveRef"], to_native(error)))
if drive_reference_list:
self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}])
except Exception as error:
self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
return self.upgrade_list_cache
def wait_for_upgrade_completion(self):
"""Wait for drive firmware upgrade to complete."""
drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]]
last_status = None
for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)):
try:
rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid)
# Check drive status
for status in response["driveStatus"]:
last_status = status
if status["driveRef"] in drive_references:
if status["status"] == "okay":
continue
elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]:
break
else:
self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]."
% (self.ssid, status["driveRef"], status["status"]))
else:
self.upgrade_in_progress = False
break
except Exception as error:
self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
sleep(5)
else:
self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status))
def upgrade(self):
"""Apply firmware to applicable drives."""
try:
rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s"
% (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list())
self.upgrade_in_progress = True
except Exception as error:
self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
if self.wait_for_completion:
self.wait_for_upgrade_completion()
def apply(self):
"""Apply firmware policy has been enforced on E-Series storage system."""
self.upload_firmware()
if self.upgrade_list() and not self.module.check_mode:
self.upgrade()
self.module.exit_json(changed=True if self.upgrade_list() else False,
upgrade_in_process=self.upgrade_in_progress)
def main():
drive_firmware = NetAppESeriesDriveFirmware()
drive_firmware.apply()
if __name__ == '__main__':
main()

View file

@ -1,530 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_facts
short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
description:
- The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
'''
EXAMPLES = """
---
- name: Get array facts
netapp_e_facts:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample:
- Gathered facts for storage array. Array ID [1].
- Gathered facts for web services proxy.
storage_array_facts:
description: provides details about the array, controllers, management interfaces, hostside interfaces,
driveside interfaces, disks, storage pools, volumes, snapshots, and features.
returned: on successful inquiry from from embedded web services rest api
type: complex
contains:
netapp_controllers:
description: storage array controller list that contains basic controller identification and status
type: complex
sample:
- [{"name": "A", "serial": "021632007299", "status": "optimal"},
{"name": "B", "serial": "021632007300", "status": "failed"}]
netapp_disks:
description: drive list that contains identification, type, and status information for each drive
type: complex
sample:
- [{"available": false,
"firmware_version": "MS02",
"id": "01000000500003960C8B67880000000000000000",
"media_type": "ssd",
"product_id": "PX02SMU080 ",
"serial_number": "15R0A08LT2BA",
"status": "optimal",
"tray_ref": "0E00000000000000000000000000000000000000",
"usable_bytes": "799629205504" }]
netapp_driveside_interfaces:
description: drive side interface list that contains identification, type, and speed for each interface
type: complex
sample:
- [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
- [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
netapp_enabled_features:
description: specifies the enabled features on the storage array.
returned: on success
type: complex
sample:
- [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
netapp_host_groups:
description: specifies the host groups on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
netapp_hosts:
description: specifies the hosts on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "8203800000000000000000000000000000000000",
"name": "host1",
"group_id": "85000000600A098000A4B28D003610705C40B964",
"host_type_index": 28,
"ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
{ "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
netapp_host_types:
description: lists the available host types on the storage array.
returned: on success
type: complex
sample:
- [{ "index": 0, "type": "FactoryDefault" },
{ "index": 1, "type": "W2KNETNCL"},
{ "index": 2, "type": "SOL" },
{ "index": 5, "type": "AVT_4M" },
{ "index": 6, "type": "LNX" },
{ "index": 7, "type": "LnxALUA" },
{ "index": 8, "type": "W2KNETCL" },
{ "index": 9, "type": "AIX MPIO" },
{ "index": 10, "type": "VmwTPGSALUA" },
{ "index": 15, "type": "HPXTPGS" },
{ "index": 17, "type": "SolTPGSALUA" },
{ "index": 18, "type": "SVC" },
{ "index": 22, "type": "MacTPGSALUA" },
{ "index": 23, "type": "WinTPGSALUA" },
{ "index": 24, "type": "LnxTPGSALUA" },
{ "index": 25, "type": "LnxTPGSALUA_PM" },
{ "index": 26, "type": "ONTAP_ALUA" },
{ "index": 27, "type": "LnxTPGSALUA_SF" },
{ "index": 28, "type": "LnxDHALUA" },
{ "index": 29, "type": "ATTOClusterAllOS" }]
netapp_hostside_interfaces:
description: host side interface list that contains identification, configuration, type, speed, and
status information for each interface
type: complex
sample:
- [{"iscsi":
[{ "controller": "A",
"current_interface_speed": "10g",
"ipv4_address": "10.10.10.1",
"ipv4_enabled": true,
"ipv4_gateway": "10.10.10.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
"link_status": "up",
"mtu": 9000,
"supported_interface_speeds": [ "10g" ] }]}]
netapp_management_interfaces:
description: management interface list that contains identification, configuration, and status for
each interface
type: complex
sample:
- [{"alias": "ict-2800-A",
"channel": 1,
"controller": "A",
"dns_config_method": "dhcp",
"dns_servers": [],
"ipv4_address": "10.1.1.1",
"ipv4_address_config_method": "static",
"ipv4_enabled": true,
"ipv4_gateway": "10.113.1.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"link_status": "up",
"mac_address": "00A098A81B5D",
"name": "wan0",
"ntp_config_method": "disabled",
"ntp_servers": [],
"remote_ssh_access": false }]
netapp_storage_array:
description: provides storage array identification, firmware version, and available capabilities
type: dict
sample:
- {"chassis_serial": "021540006043",
"firmware": "08.40.00.01",
"name": "ict-2800-11_40",
"wwn": "600A098000A81B5D0000000059D60C76",
"cacheBlockSizes": [4096,
8192,
16384,
32768],
"supportedSegSizes": [8192,
16384,
32768,
65536,
131072,
262144,
524288]}
netapp_storage_pools:
description: storage pool list that contains identification and capacity information for each pool
type: complex
sample:
- [{"available_capacity": "3490353782784",
"id": "04000000600A098000A81B5D000002B45A953A61",
"name": "Raid6",
"total_capacity": "5399466745856",
"used_capacity": "1909112963072" }]
netapp_volumes:
description: storage volume list that contains identification and capacity information for each volume
type: complex
sample:
- [{"capacity": "5368709120",
"id": "02000000600A098000AAC0C3000002C45A952BAA",
"is_thin_provisioned": false,
"name": "5G",
"parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
netapp_workload_tags:
description: workload tag list
type: complex
sample:
- [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
"name": "ftp_server",
"workloadAttributes": [{"key": "use",
"value": "general"}]}]
netapp_volumes_by_initiators:
description: list of available volumes keyed by the mapped initiators.
type: complex
sample:
- {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
"meta_data": {"filetype": "xfs", "public": true},
"name": "some_volume",
"workload_name": "test2_volumes",
"wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
snapshot_images:
description: snapshot image list that contains identification, capacity, and status information for each
snapshot image
type: complex
sample:
- [{"active_cow": true,
"creation_method": "user",
"id": "34000000600A098000A81B5D00630A965B0535AC",
"pit_capacity": "5368709120",
"reposity_cap_utilization": "0",
"rollback_source": false,
"status": "optimal" }]
"""
from re import match
from pprint import pformat
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
class Facts(NetAppESeriesModule):
def __init__(self):
web_services_version = "02.00.0000.0000"
super(Facts, self).__init__(ansible_options={},
web_services_version=web_services_version,
supports_check_mode=True)
def get_controllers(self):
"""Retrieve a mapping of controller references to their labels."""
controllers = list()
try:
rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
% (self.ssid, str(err)))
controllers.sort()
controllers_dict = {}
i = ord('A')
for controller in controllers:
label = chr(i)
controllers_dict[controller] = label
i += 1
return controllers_dict
def get_array_facts(self):
"""Extract particular facts from the storage array graph"""
facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid)
controller_reference_label = self.get_controllers()
array_facts = None
# Get the storage array graph
try:
rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
facts['netapp_storage_array'] = dict(
name=array_facts['sa']['saData']['storageArrayLabel'],
chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
firmware=array_facts['sa']['saData']['fwVersion'],
wwn=array_facts['sa']['saData']['saId']['worldWideName'],
segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
facts['netapp_controllers'] = [
dict(
name=controller_reference_label[controller['controllerRef']],
serial=controller['serialNumber'].strip(),
status=controller['status'],
) for controller in array_facts['controller']]
facts['netapp_host_groups'] = [
dict(
id=group['id'],
name=group['name']
) for group in array_facts['storagePoolBundle']['cluster']]
facts['netapp_hosts'] = [
dict(
group_id=host['clusterRef'],
hosts_reference=host['hostRef'],
id=host['id'],
name=host['name'],
host_type_index=host['hostTypeIndex'],
posts=host['hostSidePorts']
) for host in array_facts['storagePoolBundle']['host']]
facts['netapp_host_types'] = [
dict(
type=host_type['hostType'],
index=host_type['index']
) for host_type in array_facts['sa']['hostSpecificVals']
if 'hostType' in host_type.keys() and host_type['hostType']
# This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
]
facts['snapshot_images'] = [
dict(
id=snapshot['id'],
status=snapshot['status'],
pit_capacity=snapshot['pitCapacity'],
creation_method=snapshot['creationMethod'],
reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
active_cow=snapshot['activeCOW'],
rollback_source=snapshot['isRollbackSource']
) for snapshot in array_facts['highLevelVolBundle']['pit']]
facts['netapp_disks'] = [
dict(
id=disk['id'],
available=disk['available'],
media_type=disk['driveMediaType'],
status=disk['status'],
usable_bytes=disk['usableCapacity'],
tray_ref=disk['physicalLocation']['trayRef'],
product_id=disk['productID'],
firmware_version=disk['firmwareVersion'],
serial_number=disk['serialNumber'].lstrip()
) for disk in array_facts['drive']]
facts['netapp_management_interfaces'] = [
dict(controller=controller_reference_label[controller['controllerRef']],
name=iface['ethernet']['interfaceName'],
alias=iface['ethernet']['alias'],
channel=iface['ethernet']['channel'],
mac_address=iface['ethernet']['macAddr'],
remote_ssh_access=iface['ethernet']['rloginEnabled'],
link_status=iface['ethernet']['linkStatus'],
ipv4_enabled=iface['ethernet']['ipv4Enabled'],
ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
ipv4_address=iface['ethernet']['ipv4Address'],
ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
ipv6_enabled=iface['ethernet']['ipv6Enabled'],
dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
facts['netapp_hostside_interfaces'] = [
dict(
fc=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['fibre']['channel'],
link_status=iface['fibre']['linkStatus'],
current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'fc'],
ib=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['ib']['channel'],
link_status=iface['ib']['linkState'],
mtu=iface['ib']['maximumTransmissionUnit'],
current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'ib'],
iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
iqn=iface['iscsi']['iqn'],
link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
ipv4_enabled=iface['iscsi']['ipv4Enabled'],
ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
ipv6_enabled=iface['iscsi']['ipv6Enabled'],
mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']['currentInterfaceSpeed']),
supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']
['supportedInterfaceSpeeds']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'iscsi'],
sas=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['sas']['channel'],
current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
link_status=iface['sas']['iocPort']['state'])
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'sas'])]
facts['netapp_driveside_interfaces'] = [
dict(
controller=controller_reference_label[controller['controllerRef']],
interface_type=interface['interfaceType'],
interface_speed=strip_interface_speed(
interface[interface['interfaceType']]['maximumInterfaceSpeed']
if (interface['interfaceType'] == 'sata' or
interface['interfaceType'] == 'sas' or
interface['interfaceType'] == 'fibre')
else (
interface[interface['interfaceType']]['currentSpeed']
if interface['interfaceType'] == 'ib'
else (
interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
if interface['interfaceType'] == 'iscsi' else 'unknown'
))),
)
for controller in array_facts['controller']
for interface in controller['driveInterfaces']]
facts['netapp_storage_pools'] = [
dict(
id=storage_pool['id'],
name=storage_pool['name'],
available_capacity=storage_pool['freeSpace'],
total_capacity=storage_pool['totalRaidedSpace'],
used_capacity=storage_pool['usedSpace']
) for storage_pool in array_facts['volumeGroup']]
all_volumes = list(array_facts['volume'])
facts['netapp_volumes'] = [
dict(
id=v['id'],
name=v['name'],
parent_storage_pool_id=v['volumeGroupRef'],
capacity=v['capacity'],
is_thin_provisioned=v['thinProvisioned'],
workload=v['metadata'],
) for v in all_volumes]
workload_tags = None
try:
rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
facts['netapp_workload_tags'] = [
dict(
id=workload_tag['id'],
name=workload_tag['name'],
attributes=workload_tag['workloadAttributes']
) for workload_tag in workload_tags]
# Create a dictionary of volume lists keyed by host names
facts['netapp_volumes_by_initiators'] = dict()
for mapping in array_facts['storagePoolBundle']['lunMapping']:
for host in facts['netapp_hosts']:
if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
facts['netapp_volumes_by_initiators'].update({host['name']: []})
for volume in all_volumes:
if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
# Determine workload name if there is one
workload_name = ""
metadata = dict()
for volume_tag in volume['metadata']:
if volume_tag['key'] == 'workloadId':
for workload_tag in facts['netapp_workload_tags']:
if volume_tag['value'] == workload_tag['id']:
workload_name = workload_tag['name']
metadata = dict((entry['key'], entry['value'])
for entry in workload_tag['attributes']
if entry['key'] != 'profileId')
facts['netapp_volumes_by_initiators'][host['name']].append(
dict(name=volume['name'],
id=volume['id'],
wwn=volume['wwn'],
workload_name=workload_name,
meta_data=metadata))
features = [feature for feature in array_facts['sa']['capabilities']]
features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
if feature['isEnabled']])
features = list(set(features)) # ensure unique
features.sort()
facts['netapp_enabled_features'] = features
return facts
def get_facts(self):
"""Get the embedded or web services proxy information."""
facts = self.get_array_facts()
self.module.log("isEmbedded: %s" % self.is_embedded())
self.module.log(pformat(facts))
self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
storage_array_facts=facts)
def strip_interface_speed(speed):
"""Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
if isinstance(speed, list):
result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
result = ["auto" if match(r"auto", sp) else sp for sp in result]
else:
result = match(r"speed[0-9]{1,3}[gm]", speed)
result = result.group().replace("speed", "") if result else "unknown"
result = "auto" if match(r"auto", result.lower()) else result
return result
def main():
facts = Facts()
facts.get_facts()
if __name__ == "__main__":
main()

View file

@ -1,488 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_firmware
short_description: NetApp E-Series manage firmware.
description:
- Ensure specific firmware versions are activated on E-Series storage system.
author:
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
nvsram:
description:
- Path to the NVSRAM file.
type: str
required: true
firmware:
description:
- Path to the firmware file.
type: str
required: true
wait_for_completion:
description:
- This flag will cause module to wait for any upgrade actions to complete.
type: bool
default: false
ignore_health_check:
description:
- This flag will force firmware to be activated in spite of the health check.
- Use at your own risk. Certain non-optimal states could result in data loss.
type: bool
default: false
'''
EXAMPLES = """
- name: Ensure correct firmware versions
netapp_e_firmware:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
nvsram: "path/to/nvsram"
bundle: "path/to/bundle"
wait_for_completion: true
- name: Ensure correct firmware versions
netapp_e_firmware:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
nvsram: "path/to/nvsram"
firmware: "path/to/firmware"
"""
RETURN = """
msg:
description: Status and version of firmware and NVSRAM.
type: str
returned: always
sample:
"""
import os
from time import sleep
from ansible.module_utils import six
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata, request
from ansible.module_utils._text import to_native, to_text, to_bytes
class NetAppESeriesFirmware(NetAppESeriesModule):
HEALTH_CHECK_TIMEOUT_MS = 120000
REBOOT_TIMEOUT_SEC = 15 * 60
FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC = 60
DEFAULT_TIMEOUT = 60 * 15 # This will override the NetAppESeriesModule request method timeout.
def __init__(self):
ansible_options = dict(
nvsram=dict(type="str", required=True),
firmware=dict(type="str", required=True),
wait_for_completion=dict(type="bool", default=False),
ignore_health_check=dict(type="bool", default=False))
super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True)
args = self.module.params
self.nvsram = args["nvsram"]
self.firmware = args["firmware"]
self.wait_for_completion = args["wait_for_completion"]
self.ignore_health_check = args["ignore_health_check"]
self.nvsram_name = None
self.firmware_name = None
self.is_bundle_cache = None
self.firmware_version_cache = None
self.nvsram_version_cache = None
self.upgrade_required = False
self.upgrade_in_progress = False
self.module_info = dict()
self.nvsram_name = os.path.basename(self.nvsram)
self.firmware_name = os.path.basename(self.firmware)
def is_firmware_bundled(self):
"""Determine whether supplied firmware is bundle."""
if self.is_bundle_cache is None:
with open(self.firmware, "rb") as fh:
signature = fh.read(16).lower()
if b"firmware" in signature:
self.is_bundle_cache = False
elif b"combined_content" in signature:
self.is_bundle_cache = True
else:
self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid))
return self.is_bundle_cache
def firmware_version(self):
"""Retrieve firmware version of the firmware file. Return: bytes string"""
if self.firmware_version_cache is None:
# Search firmware file for bundle or firmware version
with open(self.firmware, "rb") as fh:
line = fh.readline()
while line:
if self.is_firmware_bundled():
if b'displayableAttributeList=' in line:
for item in line[25:].split(b','):
key, value = item.split(b"|")
if key == b'VERSION':
self.firmware_version_cache = value.strip(b"\n")
break
elif b"Version:" in line:
self.firmware_version_cache = line.split()[-1].strip(b"\n")
break
line = fh.readline()
else:
self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid))
return self.firmware_version_cache
def nvsram_version(self):
"""Retrieve NVSRAM version of the NVSRAM file. Return: byte string"""
if self.nvsram_version_cache is None:
with open(self.nvsram, "rb") as fh:
line = fh.readline()
while line:
if b".NVSRAM Configuration Number" in line:
self.nvsram_version_cache = line.split(b'"')[-2]
break
line = fh.readline()
else:
self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid))
return self.nvsram_version_cache
def check_system_health(self):
"""Ensure E-Series storage system is healthy. Works for both embedded and proxy web services."""
try:
rc, request_id = self.request("health-check", method="POST", data={"onlineOnly": True, "storageDeviceIds": [self.ssid]})
while True:
sleep(1)
try:
rc, response = self.request("health-check?requestId=%s" % request_id["requestId"])
if not response["healthCheckRunning"]:
return response["results"][0]["successful"]
elif int(response["results"][0]["processingTimeMS"]) > self.HEALTH_CHECK_TIMEOUT_MS:
self.module.fail_json(msg="Health check failed to complete. Array Id [%s]." % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
except Exception as error:
self.module.fail_json(msg="Failed to initiate health check. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % self.ssid)
def embedded_check_compatibility(self):
"""Verify files are compatible with E-Series storage system."""
self.embedded_check_nvsram_compatibility()
self.embedded_check_bundle_compatibility()
def embedded_check_nvsram_compatibility(self):
"""Verify the provided NVSRAM is compatible with E-Series storage system."""
# Check nvsram compatibility
try:
files = [("nvsramimage", self.nvsram_name, self.nvsram)]
headers, data = create_multipart_formdata(files=files)
rc, nvsram_compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid,
method="POST", data=data, headers=headers)
if not nvsram_compatible["signatureTestingPassed"]:
self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram)
if not nvsram_compatible["fileCompatible"]:
self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram)
# Determine whether nvsram is required
for module in nvsram_compatible["versionContents"]:
if module["bundledVersion"] != module["onboardVersion"]:
self.upgrade_required = True
# Update bundle info
self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
except Exception as error:
self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
def embedded_check_bundle_compatibility(self):
"""Verify the provided firmware bundle is compatible with E-Series storage system."""
try:
files = [("files[]", "blob", self.firmware)]
headers, data = create_multipart_formdata(files=files, send_8kb=True)
rc, bundle_compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid,
method="POST", data=data, headers=headers)
# Determine whether valid and compatible firmware
if not bundle_compatible["signatureTestingPassed"]:
self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware)
if not bundle_compatible["fileCompatible"]:
self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware)
# Determine whether upgrade is required
for module in bundle_compatible["versionContents"]:
bundle_module_version = module["bundledVersion"].split(".")
onboard_module_version = module["onboardVersion"].split(".")
version_minimum_length = min(len(bundle_module_version), len(onboard_module_version))
if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]:
self.upgrade_required = True
# Check whether downgrade is being attempted
bundle_version = module["bundledVersion"].split(".")[:2]
onboard_version = module["onboardVersion"].split(".")[:2]
if bundle_version[0] < onboard_version[0] or (bundle_version[0] == onboard_version[0] and bundle_version[1] < onboard_version[1]):
self.module.fail_json(msg="Downgrades are not permitted. onboard [%s] > bundled[%s]."
% (module["onboardVersion"], module["bundledVersion"]))
# Update bundle info
self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
except Exception as error:
self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
def embedded_wait_for_upgrade(self):
"""Wait for SANtricity Web Services Embedded to be available after reboot."""
for count in range(0, self.REBOOT_TIMEOUT_SEC):
try:
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData" % self.ssid)
bundle_display = [m["versionString"] for m in response[0]["extendedSAData"]["codeVersions"] if m["codeModule"] == "bundleDisplay"][0]
if rc == 200 and six.b(bundle_display) == self.firmware_version() and six.b(response[0]["nvsramVersion"]) == self.nvsram_version():
self.upgrade_in_progress = False
break
except Exception as error:
pass
sleep(1)
else:
self.module.fail_json(msg="Timeout waiting for Santricity Web Services Embedded. Array [%s]" % self.ssid)
def embedded_upgrade(self):
"""Upload and activate both firmware and NVSRAM."""
files = [("nvsramfile", self.nvsram_name, self.nvsram),
("dlpfile", self.firmware_name, self.firmware)]
headers, data = create_multipart_formdata(files=files)
try:
rc, response = self.request("firmware/embedded-firmware?staged=false&nvsram=true", method="POST", data=data, headers=headers)
self.upgrade_in_progress = True
except Exception as error:
self.module.fail_json(msg="Failed to upload and activate firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
if self.wait_for_completion:
self.embedded_wait_for_upgrade()
def proxy_check_nvsram_compatibility(self):
"""Verify nvsram is compatible with E-Series storage system."""
data = {"storageDeviceIds": [self.ssid]}
try:
rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
sleep(5)
try:
rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
if not response["checkRunning"]:
for result in response["results"][0]["nvsramFiles"]:
if result["filename"] == self.nvsram_name:
return
self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
except Exception as error:
self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
def proxy_check_firmware_compatibility(self):
"""Verify firmware is compatible with E-Series storage system."""
data = {"storageDeviceIds": [self.ssid]}
try:
rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
sleep(5)
try:
rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
if not response["checkRunning"]:
for result in response["results"][0]["cfwFiles"]:
if result["filename"] == self.firmware_name:
return
self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
except Exception as error:
self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
def proxy_upload_and_check_compatibility(self):
"""Ensure firmware is uploaded and verify compatibility."""
try:
rc, cfw_files = self.request("firmware/cfw-files")
for file in cfw_files:
if file["filename"] == self.nvsram_name:
break
else:
fields = [("validate", "true")]
files = [("firmwareFile", self.nvsram_name, self.nvsram)]
headers, data = create_multipart_formdata(files=files, fields=fields)
try:
rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
except Exception as error:
self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]."
% (self.nvsram_name, self.ssid, to_native(error)))
self.proxy_check_nvsram_compatibility()
for file in cfw_files:
if file["filename"] == self.firmware_name:
break
else:
fields = [("validate", "true")]
files = [("firmwareFile", self.firmware_name, self.firmware)]
headers, data = create_multipart_formdata(files=files, fields=fields)
try:
rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
except Exception as error:
self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]."
% (self.firmware_name, self.ssid, to_native(error)))
self.proxy_check_firmware_compatibility()
except Exception as error:
self.module.fail_json(msg="Failed to retrieve existing existing firmware files. Error [%s]" % to_native(error))
def proxy_check_upgrade_required(self):
"""Staging is required to collect firmware information from the web services proxy."""
# Verify controller consistency and get firmware versions
try:
# Retrieve current bundle version
if self.is_firmware_bundled():
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid)
current_firmware_version = six.b(response[0]["versionString"])
else:
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
current_firmware_version = six.b(response[0])
# Determine whether upgrade is required
if current_firmware_version != self.firmware_version():
current = current_firmware_version.split(b".")[:2]
upgrade = self.firmware_version().split(b".")[:2]
if current[0] < upgrade[0] or (current[0] == upgrade[0] and current[1] <= upgrade[1]):
self.upgrade_required = True
else:
self.module.fail_json(msg="Downgrades are not permitted. Firmware [%s]. Array [%s]." % (self.firmware, self.ssid))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
# Determine current NVSRAM version and whether change is required
try:
rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
if six.b(response[0]) != self.nvsram_version():
self.upgrade_required = True
except Exception as error:
self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
def proxy_wait_for_upgrade(self, request_id):
"""Wait for SANtricity Web Services Proxy to report upgrade complete"""
if self.is_firmware_bundled():
while True:
try:
sleep(5)
rc, response = self.request("batch/cfw-upgrade/%s" % request_id)
if response["status"] == "complete":
self.upgrade_in_progress = False
break
elif response["status"] in ["failed", "cancelled"]:
self.module.fail_json(msg="Firmware upgrade failed to complete. Array [%s]." % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve firmware upgrade status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
else:
for count in range(0, int(self.REBOOT_TIMEOUT_SEC / 5)):
try:
sleep(5)
rc_firmware, firmware = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
rc_nvsram, nvsram = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
if six.b(firmware[0]) == self.firmware_version() and six.b(nvsram[0]) == self.nvsram_version():
self.upgrade_in_progress = False
break
except Exception as error:
pass
else:
self.module.fail_json(msg="Timed out waiting for firmware upgrade to complete. Array [%s]." % self.ssid)
def proxy_upgrade(self):
"""Activate previously uploaded firmware related files."""
request_id = None
if self.is_firmware_bundled():
data = {"activate": True,
"firmwareFile": self.firmware_name,
"nvsramFile": self.nvsram_name,
"systemInfos": [{"systemId": self.ssid,
"allowNonOptimalActivation": self.ignore_health_check}]}
try:
rc, response = self.request("batch/cfw-upgrade", method="POST", data=data)
request_id = response["requestId"]
except Exception as error:
self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
else:
data = {"stageFirmware": False,
"skipMelCheck": self.ignore_health_check,
"cfwFile": self.firmware_name,
"nvsramFile": self.nvsram_name}
try:
rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=data)
request_id = response["requestId"]
except Exception as error:
self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
self.upgrade_in_progress = True
if self.wait_for_completion:
self.proxy_wait_for_upgrade(request_id)
def apply(self):
"""Upgrade controller firmware."""
self.check_system_health()
# Verify firmware compatibility and whether changes are required
if self.is_embedded():
self.embedded_check_compatibility()
else:
self.proxy_check_upgrade_required()
# This will upload the firmware files to the web services proxy but not to the controller
if self.upgrade_required:
self.proxy_upload_and_check_compatibility()
# Perform upgrade
if self.upgrade_required and not self.module.check_mode:
if self.is_embedded():
self.embedded_upgrade()
else:
self.proxy_upgrade()
self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, status=self.module_info)
def main():
firmware = NetAppESeriesFirmware()
firmware.apply()
if __name__ == '__main__':
main()

View file

@ -1,414 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_flashcache
author: Kevin Hulquest (@hulquest)
short_description: NetApp E-Series manage SSD caches
description:
- Create or remove SSD caches on a NetApp E-Series storage array.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
required: true
description:
- The ID of the array to manage (as configured on the web services proxy).
state:
required: true
description:
- Whether the specified SSD cache should exist or not.
choices: ['present', 'absent']
default: present
name:
required: true
description:
- The name of the SSD cache to manage
io_type:
description:
- The type of workload to optimize the cache for.
choices: ['filesystem','database','media']
default: filesystem
disk_count:
description:
- The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place
size_unit:
description:
- The unit to be applied to size arguments
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: gb
cache_size_min:
description:
- The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache.
'''
EXAMPLES = """
- name: Flash Cache
netapp_e_flashcache:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
name: SSDCacheBuiltByAnsible
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: json for newly created flash cache
"""
import json
import logging
import sys
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import reduce
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class NetAppESeriesFlashCache(object):
def __init__(self):
self.name = None
self.log_mode = None
self.log_path = None
self.api_url = None
self.api_username = None
self.api_password = None
self.ssid = None
self.validate_certs = None
self.disk_count = None
self.size_unit = None
self.cache_size_min = None
self.io_type = None
self.driveRefs = None
self.state = None
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent'], type='str'),
ssid=dict(required=True, type='str'),
name=dict(required=True, type='str'),
disk_count=dict(type='int'),
disk_refs=dict(type='list'),
cache_size_min=dict(type='int'),
io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']),
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
type='str'),
criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
type='str'),
log_mode=dict(type='str'),
log_path=dict(type='str'),
))
self.module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
],
mutually_exclusive=[
],
# TODO: update validation for various selection criteria
supports_check_mode=True
)
self.__dict__.update(self.module.params)
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if self.log_mode == 'file' and self.log_path:
logging.basicConfig(level=logging.DEBUG, filename=self.log_path)
elif self.log_mode == 'stderr':
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
self.post_headers = dict(Accept="application/json")
self.post_headers['Content-Type'] = 'application/json'
def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None):
self.debug("getting candidate disks...")
drives_req = dict(
driveCount=disk_count,
sizeUnit=size_unit,
driveType='ssd',
)
if capacity:
drives_req['targetUsableCapacity'] = capacity
(rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid),
data=json.dumps(drives_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
if rc == 204:
self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache')
disk_ids = [d['id'] for d in drives_resp]
bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0)
return (disk_ids, bytes)
def create_cache(self):
(disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit,
capacity=self.cache_size_min)
self.debug("creating ssd cache...")
create_fc_req = dict(
driveRefs=disk_ids,
name=self.name
)
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
data=json.dumps(create_fc_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
def update_cache(self):
self.debug('updating flash cache config...')
update_fc_req = dict(
name=self.name,
configType=self.io_type
)
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid),
data=json.dumps(update_fc_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
def delete_cache(self):
self.debug('deleting flash cache...')
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs, ignore_errors=True)
@property
def needs_more_disks(self):
if len(self.cache_detail['driveRefs']) < self.disk_count:
self.debug("needs resize: current disk count %s < requested requested count %s",
len(self.cache_detail['driveRefs']), self.disk_count)
return True
@property
def needs_less_disks(self):
if len(self.cache_detail['driveRefs']) > self.disk_count:
self.debug("needs resize: current disk count %s < requested requested count %s",
len(self.cache_detail['driveRefs']), self.disk_count)
return True
@property
def current_size_bytes(self):
return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity'])
@property
def requested_size_bytes(self):
if self.cache_size_min:
return self.cache_size_min * self._size_unit_map[self.size_unit]
else:
return 0
@property
def needs_more_capacity(self):
if self.current_size_bytes < self.requested_size_bytes:
self.debug("needs resize: current capacity %sb is less than requested minimum %sb",
self.current_size_bytes, self.requested_size_bytes)
return True
@property
def needs_resize(self):
return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks
def resize_cache(self):
# increase up to disk count first, then iteratively add disks until we meet requested capacity
# TODO: perform this calculation in check mode
current_disk_count = len(self.cache_detail['driveRefs'])
proposed_new_disks = 0
proposed_additional_bytes = 0
proposed_disk_ids = []
if self.needs_more_disks:
proposed_disk_count = self.disk_count - current_disk_count
(disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count)
proposed_additional_bytes = bytes
proposed_disk_ids = disk_ids
while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes:
proposed_new_disks += 1
(disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks)
proposed_disk_ids = disk_ids
proposed_additional_bytes = bytes
add_drives_req = dict(
driveRef=proposed_disk_ids
)
self.debug("adding drives to flash-cache...")
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid),
data=json.dumps(add_drives_req), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
elif self.needs_less_disks and self.driveRefs:
rm_drives = dict(driveRef=self.driveRefs)
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid),
data=json.dumps(rm_drives), headers=self.post_headers, method='POST',
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs)
def apply(self):
result = dict(changed=False)
(rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
url_username=self.api_username, url_password=self.api_password,
validate_certs=self.validate_certs, ignore_errors=True)
if rc == 200:
self.cache_detail = cache_resp
else:
self.cache_detail = None
if rc not in [200, 404]:
raise Exception(
"Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp))
if self.state == 'present':
if self.cache_detail:
# TODO: verify parameters against detail for changes
if self.cache_detail['name'] != self.name:
self.debug("CHANGED: name differs")
result['changed'] = True
if self.cache_detail['flashCacheBase']['configType'] != self.io_type:
self.debug("CHANGED: io_type differs")
result['changed'] = True
if self.needs_resize:
self.debug("CHANGED: resize required")
result['changed'] = True
else:
self.debug("CHANGED: requested state is 'present' but cache does not exist")
result['changed'] = True
else: # requested state is absent
if self.cache_detail:
self.debug("CHANGED: requested state is 'absent' but cache exists")
result['changed'] = True
if not result['changed']:
self.debug("no changes, exiting...")
self.module.exit_json(**result)
if self.module.check_mode:
self.debug("changes pending in check mode, exiting early...")
self.module.exit_json(**result)
if self.state == 'present':
if not self.cache_detail:
self.create_cache()
else:
if self.needs_resize:
self.resize_cache()
# run update here as well, since io_type can't be set on creation
self.update_cache()
elif self.state == 'absent':
self.delete_cache()
# TODO: include other details about the storage pool (size, type, id, etc)
self.module.exit_json(changed=result['changed'], **self.resp)
def main():
sp = NetAppESeriesFlashCache()
try:
sp.apply()
except Exception as e:
sp.debug("Exception in apply(): \n%s", to_native(e))
sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View file

@ -1,157 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_global
short_description: NetApp E-Series manage global settings configuration
description:
- Allow the user to configure several of the global settings associated with an E-Series storage-system
author: Michael Price (@lmprice)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
name:
description:
- Set the name of the E-Series storage-system
- This label/name doesn't have to be unique.
- May be up to 30 characters in length.
aliases:
- label
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module requires Web Services API v1.3 or newer.
'''
EXAMPLES = """
- name: Set the storage-system name
netapp_e_global:
name: myArrayName
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
name:
description:
- The current name/label of the storage-system.
returned: on success
sample: myArrayName
type: str
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class GlobalSettings(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['label']),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.name and len(self.name) > 30:
self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.")
def get_name(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds)
if result['status'] in ['offline', 'neverContacted']:
self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid))
return result['name']
except Exception as err:
self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
def update_name(self):
name = self.get_name()
update = False
if self.name != name:
update = True
body = dict(name=self.name)
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
self._logger.info("Set name to %s.", result['name'])
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_name()
name = self.get_name()
self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = GlobalSettings()
settings()
if __name__ == '__main__':
main()

View file

@ -1,536 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_host
short_description: NetApp E-Series manage eseries hosts
description: Create, update, remove hosts on NetApp E-series storage arrays
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
name:
description:
- If the host doesn't yet exist, the label/name to assign at creation time.
- If the hosts already exists, this will be used to uniquely identify the host to make any required changes
required: True
aliases:
- label
state:
description:
- Set to absent to remove an existing host
- Set to present to modify or create a new host definition
choices:
- absent
- present
default: present
host_type:
description:
- This is the type of host to be mapped
- Required when C(state=present)
- Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a
host type index which can be found in M(netapp_e_facts)
type: str
aliases:
- host_type_index
ports:
description:
- A list of host ports you wish to associate with the host.
- Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are
uniquely identified by a label and these must be unique.
required: False
suboptions:
type:
description:
- The interface type of the port to define.
- Acceptable choices depend on the capabilities of the target hardware/software platform.
required: true
choices:
- iscsi
- sas
- fc
- ib
- nvmeof
- ethernet
label:
description:
- A unique label to assign to this port assignment.
required: true
port:
description:
- The WWN or IQN of the hostPort to assign to this port definition.
required: true
force_port:
description:
- Allow ports that are already assigned to be re-assigned to your current host
required: false
type: bool
group:
description:
- The unique identifier of the host-group you want the host to be a member of; this is used for clustering.
required: False
aliases:
- cluster
log_path:
description:
- A local path to a file to be used for debug logging
required: False
'''
EXAMPLES = """
- name: Define or update an existing host named 'Host1'
netapp_e_host:
ssid: "1"
api_url: "10.113.1.101:8443"
api_username: admin
api_password: myPassword
name: "Host1"
state: present
host_type_index: Linux DM-MP
ports:
- type: 'iscsi'
label: 'PORT_1'
port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe'
- type: 'fc'
label: 'FC_1'
port: '10:00:FF:7C:FF:FF:FF:01'
- type: 'fc'
label: 'FC_2'
port: '10:00:FF:7C:FF:FF:FF:00'
- name: Ensure a host named 'Host2' doesn't exist
netapp_e_host:
ssid: "1"
api_url: "10.113.1.101:8443"
api_username: admin
api_password: myPassword
name: "Host2"
state: absent
"""
RETURN = """
msg:
description:
- A user-readable description of the actions performed.
returned: on success
type: str
sample: The host has been created.
id:
description:
- the unique identifier of the host on the E-Series storage-system
returned: on success when state=present
type: str
sample: 00000000600A098000AAC0C3003004700AD86A52
version_added: "2.6"
ssid:
description:
- the unique identifier of the E-Series storage-system with the current api
returned: on success
type: str
sample: 1
version_added: "2.6"
api_url:
description:
- the url of the API that this request was processed by
returned: on success
type: str
sample: https://webservices.example.com:8443
version_added: "2.6"
"""
import json
import logging
import re
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Host(object):
HOST_TYPE_INDEXES = {"linux dm-mp": 28, "vmware": 10, "windows": 1, "windows clustered": 8}
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
group=dict(type='str', required=False, aliases=['cluster']),
ports=dict(type='list', required=False),
force_port=dict(type='bool', default=False),
name=dict(type='str', required=True, aliases=['label']),
host_type_index=dict(type='str', aliases=['host_type']),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
self.check_mode = self.module.check_mode
args = self.module.params
self.group = args['group']
self.ports = args['ports']
self.force_port = args['force_port']
self.name = args['name']
self.state = args['state']
self.ssid = args['ssid']
self.url = args['api_url']
self.user = args['api_username']
self.pwd = args['api_password']
self.certs = args['validate_certs']
self.post_body = dict()
self.all_hosts = list()
self.host_obj = dict()
self.newPorts = list()
self.portsForUpdate = list()
self.portsForRemoval = list()
# Update host type with the corresponding index
host_type = args['host_type_index']
if host_type:
host_type = host_type.lower()
if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]:
self.host_type_index = self.HOST_TYPE_INDEXES[host_type]
elif host_type.isdigit():
self.host_type_index = int(args['host_type_index'])
else:
self.module.fail_json(msg="host_type must be either a host type name or host type index found integer"
" the documentation.")
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if args['log_path']:
logging.basicConfig(
level=logging.DEBUG, filename=args['log_path'], filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
# Ensure when state==present then host_type_index is defined
if self.state == "present" and self.host_type_index is None:
self.module.fail_json(msg="Host_type_index is required when state=='present'. Array Id: [%s]" % self.ssid)
# Fix port representation if they are provided with colons
if self.ports is not None:
for port in self.ports:
port['label'] = port['label'].lower()
port['type'] = port['type'].lower()
port['port'] = port['port'].lower()
# Determine whether address is 16-byte WWPN and, if so, remove
if re.match(r'^(0x)?[0-9a-f]{16}$', port['port'].replace(':', '')):
port['port'] = port['port'].replace(':', '').replace('0x', '')
def valid_host_type(self):
host_types = None
try:
(rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
try:
match = list(filter(lambda host_type: host_type['index'] == self.host_type_index, host_types))[0]
return True
except IndexError:
self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
def assigned_host_ports(self, apply_unassigning=False):
"""Determine if the hostPorts requested have already been assigned and return list of required used ports."""
used_host_ports = {}
for host in self.all_hosts:
if host['label'] != self.name:
for host_port in host['hostSidePorts']:
for port in self.ports:
if port['port'] == host_port["address"] or port['label'] == host_port['label']:
if not self.force_port:
self.module.fail_json(msg="There are no host ports available OR there are not enough"
" unassigned host ports")
else:
# Determine port reference
port_ref = [port["hostPortRef"] for port in host["ports"]
if port["hostPortName"] == host_port["address"]]
port_ref.extend([port["initiatorRef"] for port in host["initiators"]
if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
# Create dictionary of hosts containing list of port references
if host["hostRef"] not in used_host_ports.keys():
used_host_ports.update({host["hostRef"]: port_ref})
else:
used_host_ports[host["hostRef"]].extend(port_ref)
else:
for host_port in host['hostSidePorts']:
for port in self.ports:
if ((host_port['label'] == port['label'] and host_port['address'] != port['port']) or
(host_port['label'] != port['label'] and host_port['address'] == port['port'])):
if not self.force_port:
self.module.fail_json(msg="There are no host ports available OR there are not enough"
" unassigned host ports")
else:
# Determine port reference
port_ref = [port["hostPortRef"] for port in host["ports"]
if port["hostPortName"] == host_port["address"]]
port_ref.extend([port["initiatorRef"] for port in host["initiators"]
if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
# Create dictionary of hosts containing list of port references
if host["hostRef"] not in used_host_ports.keys():
used_host_ports.update({host["hostRef"]: port_ref})
else:
used_host_ports[host["hostRef"]].extend(port_ref)
# Unassign assigned ports
if apply_unassigning:
for host_ref in used_host_ports.keys():
try:
rc, resp = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, host_ref),
url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST',
data=json.dumps({"portsToRemove": used_host_ports[host_ref]}))
except Exception as err:
self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]."
" Error [%s]." % (self.host_obj['id'], self.ssid,
used_host_ports[host_ref], to_native(err)))
return used_host_ports
def group_id(self):
if self.group:
try:
(rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
try:
group_obj = list(filter(lambda group: group['name'] == self.group, all_groups))[0]
return group_obj['id']
except IndexError:
self.module.fail_json(msg="No group with the name: %s exists" % self.group)
else:
# Return the value equivalent of no group
return "0000000000000000000000000000000000000000"
def host_exists(self):
"""Determine if the requested host exists
As a side effect, set the full list of defined hosts in 'all_hosts', and the target host in 'host_obj'.
"""
match = False
all_hosts = list()
try:
(rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
# Augment the host objects
for host in all_hosts:
for port in host['hostSidePorts']:
port['type'] = port['type'].lower()
port['address'] = port['address'].lower()
port['label'] = port['label'].lower()
# Augment hostSidePorts with their ID (this is an omission in the API)
ports = dict((port['label'], port['id']) for port in host['ports'])
ports.update((port['label'], port['id']) for port in host['initiators'])
for host_side_port in host['hostSidePorts']:
if host_side_port['label'] in ports:
host_side_port['id'] = ports[host_side_port['label']]
if host['label'] == self.name:
self.host_obj = host
match = True
self.all_hosts = all_hosts
return match
def needs_update(self):
"""Determine whether we need to update the Host object
As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add
(newPorts), on self.
"""
changed = False
if (self.host_obj["clusterRef"].lower() != self.group_id().lower() or
self.host_obj["hostTypeIndex"] != self.host_type_index):
self._logger.info("Either hostType or the clusterRef doesn't match, an update is required.")
changed = True
current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]})
for port in self.host_obj["hostSidePorts"])
if self.ports:
for port in self.ports:
for current_host_port_id in current_host_ports.keys():
if port == current_host_ports[current_host_port_id]:
current_host_ports.pop(current_host_port_id)
break
elif port["port"] == current_host_ports[current_host_port_id]["port"]:
if self.port_on_diff_host(port) and not self.force_port:
self.module.fail_json(msg="The port you specified [%s] is associated with a different host."
" Specify force_port as True or try a different port spec" % port)
if (port["label"] != current_host_ports[current_host_port_id]["label"] or
port["type"] != current_host_ports[current_host_port_id]["type"]):
current_host_ports.pop(current_host_port_id)
self.portsForUpdate.append({"portRef": current_host_port_id, "port": port["port"],
"label": port["label"], "hostRef": self.host_obj["hostRef"]})
break
else:
self.newPorts.append(port)
self.portsForRemoval = list(current_host_ports.keys())
changed = any([self.newPorts, self.portsForUpdate, self.portsForRemoval, changed])
return changed
def port_on_diff_host(self, arg_port):
""" Checks to see if a passed in port arg is present on a different host """
for host in self.all_hosts:
# Only check 'other' hosts
if host['name'] != self.name:
for port in host['hostSidePorts']:
# Check if the port label is found in the port dict list of each host
if arg_port['label'] == port['label'] or arg_port['port'] == port['address']:
self.other_host = host
return True
return False
def update_host(self):
self._logger.info("Beginning the update for host=%s.", self.name)
if self.ports:
# Remove ports that need reassigning from their current host.
self.assigned_host_ports(apply_unassigning=True)
self.post_body["portsToUpdate"] = self.portsForUpdate
self.post_body["ports"] = self.newPorts
self._logger.info("Requested ports: %s", pformat(self.ports))
else:
self._logger.info("No host ports were defined.")
if self.group:
self.post_body['groupId'] = self.group_id()
self.post_body['hostType'] = dict(index=self.host_type_index)
api = self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id'])
self._logger.info("POST => url=%s, body=%s.", api, pformat(self.post_body))
if not self.check_mode:
try:
(rc, self.host_obj) = request(api, url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
except Exception as err:
self.module.fail_json(
msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=True, **payload)
def create_host(self):
self._logger.info("Creating host definition.")
# Remove ports that need reassigning from their current host.
self.assigned_host_ports(apply_unassigning=True)
# needs_reassignment = False
post_body = dict(
name=self.name,
hostType=dict(index=self.host_type_index),
groupId=self.group_id(),
)
if self.ports:
post_body.update(ports=self.ports)
api = self.url + "storage-systems/%s/hosts" % self.ssid
self._logger.info('POST => url=%s, body=%s', api, pformat(post_body))
if not self.check_mode:
if not self.host_exists():
try:
(rc, self.host_obj) = request(api, method='POST', url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
data=json.dumps(post_body), headers=HEADERS)
except Exception as err:
self.module.fail_json(
msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
else:
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload)
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=True, msg='Host created.', **payload)
def remove_host(self):
try:
(rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
method='DELETE',
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(
msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
self.ssid,
to_native(err)))
def build_success_payload(self, host=None):
keys = ['id']
if host is not None:
result = dict((key, host[key]) for key in keys)
else:
result = dict()
result['ssid'] = self.ssid
result['api_url'] = self.url
return result
def apply(self):
if self.state == 'present':
if self.host_exists():
if self.needs_update() and self.valid_host_type():
self.update_host()
else:
payload = self.build_success_payload(self.host_obj)
self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload)
elif self.valid_host_type():
self.create_host()
else:
payload = self.build_success_payload()
if self.host_exists():
self.remove_host()
self.module.exit_json(changed=True, msg="Host removed.", **payload)
else:
self.module.exit_json(changed=False, msg="Host already absent.", **payload)
def main():
host = Host()
host.apply()
if __name__ == '__main__':
main()

View file

@ -1,302 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"}
DOCUMENTATION = '''
---
module: netapp_e_hostgroup
short_description: NetApp E-Series manage array host groups
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
description: Create, update or destroy host groups on a NetApp E-Series storage array.
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
required: true
description:
- Whether the specified host group should exist or not.
choices: ["present", "absent"]
name:
required: false
description:
- Name of the host group to manage
- This option is mutually exclusive with I(id).
new_name:
required: false
description:
- Specify this when you need to update the name of a host group
id:
required: false
description:
- Host reference identifier for the host group to manage.
- This option is mutually exclusive with I(name).
hosts:
required: false
description:
- List of host names/labels to add to the group
'''
EXAMPLES = """
- name: Configure Hostgroup
netapp_e_hostgroup:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
"""
RETURN = """
clusterRef:
description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
returned: always except when state is absent
type: str
sample: "3233343536373839303132333100000000000000"
confirmLUNMappingCreation:
description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
will alter the volume access rights of other clusters, in addition to this one.
returned: always
type: bool
sample: false
hosts:
description: A list of the hosts that are part of the host group after all operations.
returned: always except when state is absent
type: list
sample: ["HostA","HostB"]
id:
description: The id number of the hostgroup
returned: always except when state is absent
type: str
sample: "3233343536373839303132333100000000000000"
isSAControlled:
description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
returned: always except when state is absent
type: bool
sample: false
label:
description: The user-assigned, descriptive label string for the cluster.
returned: always
type: str
sample: "MyHostGroup"
name:
description: same as label
returned: always except when state is absent
type: str
sample: "MyHostGroup"
protectionInformationCapableAccessMethod:
description: This field is true if the host has a PI capable access method.
returned: always except when state is absent
type: bool
sample: true
"""
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
from ansible.module_utils._text import to_native
class NetAppESeriesHostGroup(NetAppESeriesModule):
EXPANSION_TIMEOUT_SEC = 10
DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
def __init__(self):
version = "02.00.0000.0000"
ansible_options = dict(
state=dict(required=True, choices=["present", "absent"], type="str"),
name=dict(required=False, type="str"),
new_name=dict(required=False, type="str"),
id=dict(required=False, type="str"),
hosts=dict(required=False, type="list"))
mutually_exclusive = [["name", "id"]]
super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options,
web_services_version=version,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive)
args = self.module.params
self.state = args["state"]
self.name = args["name"]
self.new_name = args["new_name"]
self.id = args["id"]
self.hosts_list = args["hosts"]
self.current_host_group = None
@property
def hosts(self):
"""Retrieve a list of host reference identifiers should be associated with the host group."""
host_list = []
existing_hosts = []
if self.hosts_list:
try:
rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
for host in self.hosts_list:
for existing_host in existing_hosts:
if host in existing_host["id"] or host in existing_host["name"]:
host_list.append(existing_host["id"])
break
else:
self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]."
% (self.ssid, host))
return host_list
@property
def host_groups(self):
"""Retrieve a list of existing host groups."""
host_groups = []
hosts = []
try:
rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid)
rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups]
for group in host_groups:
hosts_ids = []
for host in hosts:
if group["id"] == host["clusterRef"]:
hosts_ids.append(host["hostRef"])
group.update({"hosts": hosts_ids})
return host_groups
@property
def current_hosts_in_host_group(self):
"""Retrieve the current hosts associated with the current hostgroup."""
current_hosts = []
for group in self.host_groups:
if (self.name and group["name"] == self.name) or (self.id and group["id"] == self.id):
current_hosts = group["hosts"]
return current_hosts
def unassign_hosts(self, host_list=None):
"""Unassign hosts from host group."""
if host_list is None:
host_list = self.current_host_group["hosts"]
for host_id in host_list:
try:
rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id),
method="POST", data={"group": "0000000000000000000000000000000000000000"})
except Exception as error:
self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]."
" Error[%s]." % (self.ssid, host_id, to_native(error)))
def delete_host_group(self, unassign_hosts=True):
"""Delete host group"""
if unassign_hosts:
self.unassign_hosts()
try:
rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]),
method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
def create_host_group(self):
"""Create host group."""
data = {"name": self.name, "hosts": self.hosts}
response = None
try:
rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data)
except Exception as error:
self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return response
def update_host_group(self):
"""Update host group."""
data = {"name": self.new_name if self.new_name else self.name,
"hosts": self.hosts}
# unassign hosts that should not be part of the hostgroup
desired_host_ids = self.hosts
for host in self.current_hosts_in_host_group:
if host not in desired_host_ids:
self.unassign_hosts([host])
update_response = None
try:
rc, update_response = self.request("storage-systems/%s/host-groups/%s"
% (self.ssid, self.current_host_group["id"]), method="POST", data=data)
except Exception as error:
self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return update_response
def apply(self):
"""Apply desired host group state to the storage array."""
changes_required = False
# Search for existing host group match
for group in self.host_groups:
if (self.id and group["id"] == self.id) or (self.name and group["name"] == self.name):
self.current_host_group = group
# Determine whether changes are required
if self.state == "present":
if self.current_host_group:
if (self.new_name and self.new_name != self.name) or self.hosts != self.current_host_group["hosts"]:
changes_required = True
else:
if not self.name:
self.module.fail_json(msg="The option name must be supplied when creating a new host group."
" Array id [%s]." % self.ssid)
changes_required = True
elif self.current_host_group:
changes_required = True
# Apply any necessary changes
msg = ""
if changes_required and not self.module.check_mode:
msg = "No changes required."
if self.state == "present":
if self.current_host_group:
if ((self.new_name and self.new_name != self.name) or
(self.hosts != self.current_host_group["hosts"])):
msg = self.update_host_group()
else:
msg = self.create_host_group()
elif self.current_host_group:
self.delete_host_group()
msg = "Host group deleted. Array Id [%s]. Host Name [%s]. Host Id [%s]."\
% (self.ssid, self.current_host_group["name"], self.current_host_group["id"])
self.module.exit_json(msg=msg, changed=changes_required)
def main():
hostgroup = NetAppESeriesHostGroup()
hostgroup.apply()
if __name__ == "__main__":
main()

View file

@ -1,398 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_iscsi_interface
short_description: NetApp E-Series manage iSCSI interface configuration
description:
- Configure settings of an E-Series iSCSI interface
author: Michael Price (@lmprice)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
controller:
description:
- The controller that owns the port you want to configure.
- Controller names are presented alphabetically, with the first controller as A,
the second as B, and so on.
- Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
limitation and could change in the future.
required: yes
choices:
- A
- B
name:
description:
- The channel of the port to modify the configuration of.
- The list of choices is not necessarily comprehensive. It depends on the number of ports
that are available in the system.
- The numerical value represents the number of the channel (typically from left to right on the HIC),
beginning with a value of 1.
required: yes
aliases:
- channel
state:
description:
- When enabled, the provided configuration will be utilized.
- When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled.
choices:
- enabled
- disabled
default: enabled
address:
description:
- The IPv4 address to assign to the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
subnet_mask:
description:
- The subnet mask to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
gateway:
description:
- The IPv4 gateway address to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
config_method:
description:
- The configuration method type to use for this interface.
- dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
choices:
- dhcp
- static
default: dhcp
mtu:
description:
- The maximum transmission units (MTU), in bytes.
- This allows you to configure a larger value for the MTU, in order to enable jumbo frames
(any value > 1500).
- Generally, it is necessary to have your host, switches, and other components not only support jumbo
frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
leave this at the default.
default: 1500
aliases:
- max_frame_size
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
via dhcp, etc), can take seconds or minutes longer to take effect.
- This module will not be useful/usable on an E-Series system without any iSCSI interfaces.
- This module requires a Web Services API version of >= 1.3.
'''
EXAMPLES = """
- name: Configure the first port on the A controller with a static IPv4 address
netapp_e_iscsi_interface:
name: "1"
controller: "A"
config_method: static
address: "192.168.1.100"
subnet_mask: "255.255.255.0"
gateway: "192.168.1.1"
ssid: "1"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable ipv4 connectivity for the second port on the B controller
netapp_e_iscsi_interface:
name: "2"
controller: "B"
state: disabled
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
- name: Enable jumbo frames for the first 4 ports on controller A
netapp_e_iscsi_interface:
name: "{{ item | int }}"
controller: "A"
state: enabled
mtu: 9000
config_method: dhcp
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
loop:
- 1
- 2
- 3
- 4
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The interface settings have been updated.
enabled:
description:
- Indicates whether IPv4 connectivity has been enabled or disabled.
- This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance,
it is unlikely that the configuration will actually be valid.
returned: on success
sample: True
type: bool
"""
import json
import logging
from pprint import pformat
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiInterface(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
controller=dict(type='str', required=True, choices=['A', 'B']),
name=dict(type='int', aliases=['channel']),
state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
address=dict(type='str', required=False),
subnet_mask=dict(type='str', required=False),
gateway=dict(type='str', required=False),
config_method=dict(type='str', required=False, default='dhcp', choices=['dhcp', 'static']),
mtu=dict(type='int', default=1500, required=False, aliases=['max_frame_size']),
log_path=dict(type='str', required=False),
))
required_if = [
["config_method", "static", ["address", "subnet_mask"]],
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, )
args = self.module.params
self.controller = args['controller']
self.name = args['name']
self.mtu = args['mtu']
self.state = args['state']
self.address = args['address']
self.subnet_mask = args['subnet_mask']
self.gateway = args['gateway']
self.config_method = args['config_method']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.mtu < 1500 or self.mtu > 9000:
self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.")
if self.config_method == 'dhcp' and any([self.address, self.subnet_mask, self.gateway]):
self.module.fail_json(msg='A config_method of dhcp is mutually exclusive with the address,'
' subnet_mask, and gateway options.')
# A relatively primitive regex to validate that the input is formatted like a valid ip address
address_regex = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
if self.address and not address_regex.match(self.address):
self.module.fail_json(msg="An invalid ip address was provided for address.")
if self.subnet_mask and not address_regex.match(self.subnet_mask):
self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
if self.gateway and not address_regex.match(self.gateway):
self.module.fail_json(msg="An invalid ip address was provided for gateway.")
@property
def interfaces(self):
ifaces = list()
try:
(rc, ifaces) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
# Filter out non-iSCSI interfaces
ifaces = [iface['iscsi'] for iface in ifaces if iface['interfaceType'] == 'iscsi']
return ifaces
def get_controllers(self):
"""Retrieve a mapping of controller labels to their references
{
'A': '070000000000000000000001',
'B': '070000000000000000000002',
}
:return: the controllers defined on the system
"""
controllers = list()
try:
(rc, controllers) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/id'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
controllers.sort()
controllers_dict = {}
i = ord('A')
for controller in controllers:
label = chr(i)
controllers_dict[label] = controller
i += 1
return controllers_dict
def fetch_target_interface(self):
interfaces = self.interfaces
for iface in interfaces:
if iface['channel'] == self.name and self.controllers[self.controller] == iface['controllerId']:
return iface
channels = sorted(set((str(iface['channel'])) for iface in interfaces
if self.controllers[self.controller] == iface['controllerId']))
self.module.fail_json(msg="The requested channel of %s is not valid. Valid channels include: %s."
% (self.name, ", ".join(channels)))
def make_update_body(self, target_iface):
body = dict(iscsiInterface=target_iface['id'])
update_required = False
self._logger.info("Requested state=%s.", self.state)
self._logger.info("config_method: current=%s, requested=%s",
target_iface['ipv4Data']['ipv4AddressConfigMethod'], self.config_method)
if self.state == 'enabled':
settings = dict()
if not target_iface['ipv4Enabled']:
update_required = True
settings['ipv4Enabled'] = [True]
if self.mtu != target_iface['interfaceData']['ethernetData']['maximumFramePayloadSize']:
update_required = True
settings['maximumFramePayloadSize'] = [self.mtu]
if self.config_method == 'static':
ipv4Data = target_iface['ipv4Data']['ipv4AddressData']
if ipv4Data['ipv4Address'] != self.address:
update_required = True
settings['ipv4Address'] = [self.address]
if ipv4Data['ipv4SubnetMask'] != self.subnet_mask:
update_required = True
settings['ipv4SubnetMask'] = [self.subnet_mask]
if self.gateway is not None and ipv4Data['ipv4GatewayAddress'] != self.gateway:
update_required = True
settings['ipv4GatewayAddress'] = [self.gateway]
if target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configStatic':
update_required = True
settings['ipv4AddressConfigMethod'] = ['configStatic']
elif (target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configDhcp'):
update_required = True
settings.update(dict(ipv4Enabled=[True],
ipv4AddressConfigMethod=['configDhcp']))
body['settings'] = settings
else:
if target_iface['ipv4Enabled']:
update_required = True
body['settings'] = dict(ipv4Enabled=[False])
self._logger.info("Update required ?=%s", update_required)
self._logger.info("Update body: %s", pformat(body))
return update_required, body
def update(self):
self.controllers = self.get_controllers()
if self.controller not in self.controllers:
self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s."
% ", ".join(self.controllers.keys()))
iface_before = self.fetch_target_interface()
update_required, body = self.make_update_body(iface_before)
if update_required and not self.check_mode:
try:
url = (self.url +
'storage-systems/%s/symbol/setIscsiInterfaceProperties' % self.ssid)
(rc, result) = request(url, method='POST', data=json.dumps(body), headers=HEADERS, timeout=300,
ignore_errors=True, **self.creds)
# We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook
# is cancelled mid-flight), that it isn't worth the complexity.
if rc == 422 and result['retcode'] in ['busy', '3']:
self.module.fail_json(
msg="The interface is currently busy (probably processing a previously requested modification"
" request). This operation cannot currently be completed. Array Id [%s]. Error [%s]."
% (self.ssid, result))
# Handle authentication issues, etc.
elif rc != 200:
self.module.fail_json(
msg="Failed to modify the interface! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(result)))
self._logger.debug("Update request completed successfully.")
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
iface_after = self.fetch_target_interface()
self.module.exit_json(msg="The interface settings have been updated.", changed=update_required,
enabled=iface_after['ipv4Enabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiInterface()
iface()
if __name__ == '__main__':
main()

View file

@ -1,294 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_iscsi_target
short_description: NetApp E-Series manage iSCSI target configuration
description:
- Configure the settings of an E-Series iSCSI target
author: Michael Price (@lmprice)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
name:
description:
- The name/alias to assign to the iSCSI target.
- This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
aliases:
- alias
ping:
description:
- Enable ICMP ping responses from the configured iSCSI ports.
type: bool
default: yes
chap_secret:
description:
- Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
- When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
whether or not the password has changed.
- The chap secret may only use ascii characters with values between 32 and 126 decimal.
- The chap secret must be no less than 12 characters, but no greater than 57 characters in length.
- The chap secret is cleared when not specified or an empty string.
aliases:
- chap
- password
unnamed_discovery:
description:
- When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
discovery session if the iSCSI target iqn is not specified in the request.
- This option may be disabled to increase security if desired.
type: bool
default: yes
log_path:
description:
- A local path (on the Ansible controller), to a file to be used for debug logging.
required: no
notes:
- Check mode is supported.
- Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
M(netapp_e_iscsi_interface).
- This module requires a Web Services API version of >= 1.3.
'''
EXAMPLES = """
- name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
netapp_e_iscsi_target:
api_url: "https://localhost:8443/devmgr/v2"
api_username: admin
api_password: myPassword
ssid: "1"
validate_certs: no
name: myTarget
ping: yes
unnamed_discovery: yes
- name: Set the target alias and the CHAP secret
netapp_e_iscsi_target:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
name: myTarget
chap: password1234
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The iSCSI target settings have been updated.
alias:
description:
- The alias assigned to the iSCSI target.
returned: on success
sample: myArray
type: str
iqn:
description:
- The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
returned: on success
sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
type: str
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiTarget(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['alias']),
ping=dict(type='bool', required=False, default=True),
chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True),
unnamed_discovery=dict(type='bool', required=False, default=True),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ping = args['ping']
self.chap_secret = args['chap_secret']
self.unnamed_discovery = args['unnamed_discovery']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.chap_secret:
if len(self.chap_secret) < 12 or len(self.chap_secret) > 57:
self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57"
" characters in length.")
for c in self.chap_secret:
ordinal = ord(c)
if ordinal < 32 or ordinal > 126:
self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
" characters with decimal values between 32 and 126.")
@property
def target(self):
"""Provide information on the iSCSI Target configuration
Sample:
{
'alias': 'myCustomName',
'ping': True,
'unnamed_discovery': True,
'chap': False,
'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45',
}
"""
target = dict()
try:
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target'
% self.ssid, headers=HEADERS, **self.creds)
# This likely isn't an iSCSI-enabled system
if not data:
self.module.fail_json(
msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid))
data = data[0]
chap = any(
[auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap'])
target.update(dict(alias=data['alias']['iscsiAlias'],
iqn=data['nodeName']['iscsiNodeName'],
chap=chap))
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData'
% self.ssid, headers=HEADERS, **self.creds)
data = data[0]
target.update(dict(ping=data['icmpPingResponseEnabled'],
unnamed_discovery=data['unnamedDiscoverySessionsEnabled']))
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return target
def apply_iscsi_settings(self):
"""Update the iSCSI target alias and CHAP settings"""
update = False
target = self.target
body = dict()
if self.name is not None and self.name != target['alias']:
update = True
body['alias'] = self.name
# If the CHAP secret was provided, we trigger an update.
if self.chap_secret:
update = True
body.update(dict(enableChapAuthentication=True,
chapSecret=self.chap_secret))
# If no secret was provided, then we disable chap
elif target['chap']:
update = True
body.update(dict(enableChapAuthentication=False))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def apply_target_changes(self):
update = False
target = self.target
body = dict()
if self.ping != target['ping']:
update = True
body['icmpPingResponseEnabled'] = self.ping
if self.unnamed_discovery != target['unnamed_discovery']:
update = True
body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST',
data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.apply_iscsi_settings()
update = self.apply_target_changes() or update
target = self.target
data = dict((key, target[key]) for key in target if key in ['iqn', 'alias'])
self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiTarget()
iface()
if __name__ == '__main__':
main()

View file

@ -1,390 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_ldap
short_description: NetApp E-Series manage LDAP integration to use for authentication
description:
- Configure an E-Series system to allow authentication via an LDAP server
author: Michael Price (@lmprice)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains.
choices:
- present
- absent
default: present
identifier:
description:
- This is a unique identifier for the configuration (for cases where there are multiple domains configured).
- If this is not specified, but I(state=present), we will utilize a default value of 'default'.
username:
description:
- This is the user account that will be used for querying the LDAP server.
- "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
required: yes
aliases:
- bind_username
password:
description:
- This is the password for the bind user account.
required: yes
aliases:
- bind_password
attributes:
description:
- The user attributes that should be considered for the group to role mapping.
- Typically this is used with something like 'memberOf', and a user's access is tested against group
membership or lack thereof.
default: memberOf
server:
description:
- This is the LDAP server url.
- The connection string should be specified as using the ldap or ldaps protocol along with the port
information.
aliases:
- server_url
required: yes
name:
description:
- The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
- Default to use the DNS name of the I(server).
- The only requirement is that the name[s] be resolvable.
- "Example: user@example.com"
required: no
search_base:
description:
- The search base is used to find group memberships of the user.
- "Example: ou=users,dc=example,dc=com"
required: yes
role_mappings:
description:
- This is where you specify which groups should have access to what permissions for the
storage-system.
- For example, all users in group A will be assigned all 4 available roles, which will allow access
to all the management functionality of the system (super-user). Those in group B only have the
storage.monitor role, which will allow only read-only access.
- This is specified as a mapping of regular expressions to a list of roles. See the examples.
- The roles that will be assigned to to the group/groups matching the provided regex.
- storage.admin allows users full read/write access to storage objects and operations.
- storage.monitor allows users read-only access to storage objects and operations.
- support.admin allows users access to hardware, diagnostic information, the Major Event
Log, and other critical support-related functionality, but not the storage configuration.
- security.admin allows users access to authentication/authorization configuration, as well
as the audit log configuration, and certification management.
required: yes
user_attribute:
description:
- This is the attribute we will use to match the provided username when a user attempts to
authenticate.
default: sAMAccountName
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
different (or no), access to certain aspects of the system and API.
- The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
- Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure
the system for using LDAP authentication; every implementation is likely to be very different.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
v3.0 and higher.
'''
EXAMPLES = '''
- name: Disable LDAP authentication
netapp_e_ldap:
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
ssid: "1"
state: absent
- name: Remove the 'default' LDAP domain configuration
netapp_e_ldap:
state: absent
identifier: default
- name: Define a new LDAP domain, utilizing defaults where possible
netapp_e_ldap:
state: present
bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
bind_password: "mySecretPass"
server: "ldap://example.com:389"
search_base: 'OU=Users,DC=example,DC=com'
role_mappings:
".*dist-dev-storage.*":
- storage.admin
- security.admin
- support.admin
- storage.monitor
'''
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The ldap settings have been updated.
"""
import json
import logging
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
class Ldap(object):
NO_CHANGE_MSG = "No changes were necessary."
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='present',
choices=['present', 'absent']),
identifier=dict(type='str', required=False, ),
username=dict(type='str', required=False, aliases=['bind_username']),
password=dict(type='str', required=False, aliases=['bind_password'], no_log=True),
name=dict(type='list', required=False, ),
server=dict(type='str', required=False, aliases=['server_url']),
search_base=dict(type='str', required=False, ),
role_mappings=dict(type='dict', required=False, ),
user_attribute=dict(type='str', required=False, default='sAMAccountName'),
attributes=dict(type='list', default=['memberOf'], required=False, ),
log_path=dict(type='str', required=False),
))
required_if = [
["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]]
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
args = self.module.params
self.ldap = args['state'] == 'present'
self.identifier = args['identifier']
self.username = args['username']
self.password = args['password']
self.names = args['name']
self.server = args['server']
self.search_base = args['search_base']
self.role_mappings = args['role_mappings']
self.user_attribute = args['user_attribute']
self.attributes = args['attributes']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'],
timeout=60)
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
self.embedded = None
self.base_path = None
def make_configuration(self):
if not self.identifier:
self.identifier = 'default'
if not self.names:
parts = urlparse.urlparse(self.server)
netloc = parts.netloc
if ':' in netloc:
netloc = netloc.split(':')[0]
self.names = [netloc]
roles = list()
for regex in self.role_mappings:
for role in self.role_mappings[regex]:
roles.append(dict(groupRegex=regex,
ignoreCase=True,
name=role))
domain = dict(id=self.identifier,
ldapUrl=self.server,
bindLookupUser=dict(user=self.username, password=self.password),
roleMapCollection=roles,
groupAttributes=self.attributes,
names=self.names,
searchBase=self.search_base,
userAttribute=self.user_attribute,
)
return domain
def is_embedded(self):
"""Determine whether or not we're using the embedded or proxy implementation of Web Services"""
if self.embedded is None:
url = self.url
try:
parts = urlparse.urlparse(url)
parts = parts._replace(path='/devmgr/utils/')
url = urlparse.urlunparse(parts)
(rc, result) = request(url + 'about', **self.creds)
self.embedded = not result['runningAsProxy']
except Exception as err:
self._logger.exception("Failed to retrieve the About information.")
self.module.fail_json(msg="Failed to determine the Web Services implementation type!"
" Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return self.embedded
def get_full_configuration(self):
try:
(rc, result) = request(self.url + self.base_path, **self.creds)
return result
except Exception as err:
self._logger.exception("Failed to retrieve the LDAP configuration.")
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def get_configuration(self, identifier):
try:
(rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds)
if rc == 200:
return result
elif rc == 404:
return None
else:
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, result))
except Exception as err:
self._logger.exception("Failed to retrieve the LDAP configuration.")
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
# Define a new domain based on the user input
domain = self.make_configuration()
# This is the current list of configurations
current = self.get_configuration(self.identifier)
update = current != domain
msg = "No changes were necessary for [%s]." % self.identifier
self._logger.info("Is updated: %s", update)
if update and not self.check_mode:
msg = "The configuration changes were made for [%s]." % self.identifier
try:
if current is None:
api = self.base_path + 'addDomain'
else:
api = self.base_path + '%s' % (domain['id'])
(rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds)
except Exception as err:
self._logger.exception("Failed to modify the LDAP configuration.")
self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, update
def clear_single_configuration(self, identifier=None):
if identifier is None:
identifier = self.identifier
configuration = self.get_configuration(identifier)
updated = False
msg = self.NO_CHANGE_MSG
if configuration:
updated = True
msg = "The LDAP domain configuration for [%s] was cleared." % identifier
if not self.check_mode:
try:
(rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds)
except Exception as err:
self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, updated
def clear_configuration(self):
configuration = self.get_full_configuration()
updated = False
msg = self.NO_CHANGE_MSG
if configuration['ldapDomains']:
updated = True
msg = "The LDAP configuration for all domains was cleared."
if not self.check_mode:
try:
(rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds)
# Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs
if rc == 405:
for config in configuration['ldapDomains']:
self.clear_single_configuration(config['id'])
except Exception as err:
self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, updated
def get_base_path(self):
embedded = self.is_embedded()
if embedded:
return 'storage-systems/%s/ldap/' % self.ssid
else:
return '/ldap/'
def update(self):
self.base_path = self.get_base_path()
if self.ldap:
msg, update = self.update_configuration()
elif self.identifier:
msg, update = self.clear_single_configuration()
else:
msg, update = self.clear_configuration()
self.module.exit_json(msg=msg, changed=update, )
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Ldap()
settings()
if __name__ == '__main__':
main()

View file

@ -1,284 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_lun_mapping
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
short_description: NetApp E-Series create, delete, or modify lun mappings
description:
- Create, delete, or modify mappings between a volume and a targeted host/host+ group.
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Present will ensure the mapping exists, absent will remove the mapping.
required: True
choices: ["present", "absent"]
target:
description:
- The name of host or hostgroup you wish to assign to the mapping
- If omitted, the default hostgroup is used.
- If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
required: False
volume_name:
description:
- The name of the volume you wish to include in the mapping.
required: True
aliases:
- volume
lun:
description:
- The LUN value you wish to give the mapping.
- If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
- LUN value will be determine by the storage-system when not specified.
required: no
target_type:
description:
- This option specifies the whether the target should be a host or a group of hosts
- Only necessary when the target name is used for both a host and a group of hosts
choices:
- host
- group
required: no
'''
EXAMPLES = '''
---
- name: Map volume1 to the host target host1
netapp_e_lun_mapping:
ssid: 1
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: no
state: present
target: host1
volume: volume1
- name: Delete the lun mapping between volume1 and host1
netapp_e_lun_mapping:
ssid: 1
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: yes
state: absent
target: host1
volume: volume1
'''
RETURN = '''
msg:
description: success of the module
returned: always
type: str
sample: Lun mapping is complete
'''
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
class LunMapping(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=["present", "absent"]),
target=dict(required=False, default=None),
volume_name=dict(required=True, aliases=["volume"]),
lun=dict(type="int", required=False),
target_type=dict(required=False, choices=["host", "group"])))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
args = self.module.params
self.state = args["state"] in ["present"]
self.target = args["target"]
self.volume = args["volume_name"]
self.lun = args["lun"]
self.target_type = args["target_type"]
self.ssid = args["ssid"]
self.url = args["api_url"]
self.check_mode = self.module.check_mode
self.creds = dict(url_username=args["api_username"],
url_password=args["api_password"],
validate_certs=args["validate_certs"])
self.mapping_info = None
if not self.url.endswith('/'):
self.url += '/'
def update_mapping_info(self):
"""Collect the current state of the storage array."""
response = None
try:
rc, response = request(self.url + "storage-systems/%s/graph" % self.ssid,
method="GET", headers=HEADERS, **self.creds)
except Exception as error:
self.module.fail_json(
msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
# Create dictionary containing host/cluster references mapped to their names
target_reference = {}
target_name = {}
target_type = {}
if self.target_type is None or self.target_type == "host":
for host in response["storagePoolBundle"]["host"]:
target_reference.update({host["hostRef"]: host["name"]})
target_name.update({host["name"]: host["hostRef"]})
target_type.update({host["name"]: "host"})
if self.target_type is None or self.target_type == "group":
for cluster in response["storagePoolBundle"]["cluster"]:
# Verify there is no ambiguity between target's type (ie host and group has the same name)
if self.target and self.target_type is None and cluster["name"] == self.target and \
self.target in target_name.keys():
self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group"
" targets! Id [%s]" % self.ssid)
target_reference.update({cluster["clusterRef"]: cluster["name"]})
target_name.update({cluster["name"]: cluster["clusterRef"]})
target_type.update({cluster["name"]: "group"})
volume_reference = {}
volume_name = {}
lun_name = {}
for volume in response["volume"]:
volume_reference.update({volume["volumeRef"]: volume["name"]})
volume_name.update({volume["name"]: volume["volumeRef"]})
if volume["listOfMappings"]:
lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
for volume in response["highLevelVolBundle"]["thinVolume"]:
volume_reference.update({volume["volumeRef"]: volume["name"]})
volume_name.update({volume["name"]: volume["volumeRef"]})
if volume["listOfMappings"]:
lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
# Build current mapping object
self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"],
map_reference=mapping["mapRef"],
lun_mapping_reference=mapping["lunMappingRef"],
lun=mapping["lun"]
) for mapping in response["storagePoolBundle"]["lunMapping"]],
volume_by_reference=volume_reference,
volume_by_name=volume_name,
lun_by_name=lun_name,
target_by_reference=target_reference,
target_by_name=target_name,
target_type_by_name=target_type)
def get_lun_mapping(self):
"""Find the matching lun mapping reference.
Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun
"""
target_match = False
reference = None
lun = None
self.update_mapping_info()
# Verify that when a lun is specified that it does not match an existing lun value unless it is associated with
# the specified volume (ie for an update)
if self.lun and any((self.lun == lun_mapping["lun"] and
self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and
self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]]
) for lun_mapping in self.mapping_info["lun_mapping"]):
self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid)
# Verify that when target_type is specified then it matches the target's actually type
if self.target and self.target_type and self.target in self.mapping_info["target_type_by_name"].keys() and \
self.mapping_info["target_type_by_name"][self.target] != self.target_type:
self.module.fail_json(
msg="Option target does not match the specified target_type! Id [%s]." % self.ssid)
# Verify volume and target exist if needed for expected state.
if self.state:
if self.volume not in self.mapping_info["volume_by_name"].keys():
self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid)
if self.target and self.target not in self.mapping_info["target_by_name"].keys():
self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid)
for lun_mapping in self.mapping_info["lun_mapping"]:
# Find matching volume reference
if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]:
reference = lun_mapping["lun_mapping_reference"]
lun = lun_mapping["lun"]
# Determine if lun mapping is attached to target with the
if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and
self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and
(self.lun is None or lun == self.lun)):
target_match = True
return target_match, reference, lun
def update(self):
"""Execute the changes the require changes on the storage array."""
target_match, lun_reference, lun = self.get_lun_mapping()
update = (self.state and not target_match) or (not self.state and target_match)
if update and not self.check_mode:
try:
if self.state:
body = dict()
target = None if not self.target else self.mapping_info["target_by_name"][self.target]
if target:
body.update(dict(targetId=target))
if self.lun is not None:
body.update(dict(lun=self.lun))
if lun_reference:
rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s/move"
% (self.ssid, lun_reference), method="POST", data=json.dumps(body),
headers=HEADERS, **self.creds)
else:
body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume]))
rc, response = request(self.url + "storage-systems/%s/volume-mappings" % self.ssid,
method="POST", data=json.dumps(body), headers=HEADERS, **self.creds)
else: # Remove existing lun mapping for volume and target
rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s"
% (self.ssid, lun_reference),
method="DELETE", headers=HEADERS, **self.creds)
except Exception as error:
self.module.fail_json(
msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]"
% (self.ssid, to_native(error)))
self.module.exit_json(msg="Lun mapping is complete.", changed=update)
def main():
lun_mapping = LunMapping()
lun_mapping.update()
if __name__ == '__main__':
main()

View file

@ -1,708 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_mgmt_interface
short_description: NetApp E-Series management interface configuration
description:
- Configure the E-Series management interfaces
author:
- Michael Price (@lmprice)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Enable or disable IPv4 network interface configuration.
- Either IPv4 or IPv6 must be enabled otherwise error will occur.
- Only required when enabling or disabling IPv4 network interface
choices:
- enable
- disable
required: no
aliases:
- enable_interface
controller:
description:
- The controller that owns the port you want to configure.
- Controller names are represented alphabetically, with the first controller as A,
the second as B, and so on.
- Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
limitation and could change in the future.
required: yes
choices:
- A
- B
name:
description:
- The port to modify the configuration for.
- The list of choices is not necessarily comprehensive. It depends on the number of ports
that are present in the system.
- The name represents the port number (typically from left to right on the controller),
beginning with a value of 1.
- Mutually exclusive with I(channel).
aliases:
- port
- iface
channel:
description:
- The port to modify the configuration for.
- The channel represents the port number (typically from left to right on the controller),
beginning with a value of 1.
- Mutually exclusive with I(name).
address:
description:
- The IPv4 address to assign to the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
required: no
subnet_mask:
description:
- The subnet mask to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
required: no
gateway:
description:
- The IPv4 gateway address to utilize for the interface.
- Should be specified in xx.xx.xx.xx form.
- Mutually exclusive with I(config_method=dhcp)
required: no
config_method:
description:
- The configuration method type to use for network interface ports.
- dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
choices:
- dhcp
- static
required: no
dns_config_method:
description:
- The configuration method type to use for DNS services.
- dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup).
choices:
- dhcp
- static
required: no
dns_address:
description:
- Primary IPv4 DNS server address
required: no
dns_address_backup:
description:
- Backup IPv4 DNS server address
- Queried when primary DNS server fails
required: no
ntp_config_method:
description:
- The configuration method type to use for NTP services.
- disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
- dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
choices:
- disable
- dhcp
- static
required: no
ntp_address:
description:
- Primary IPv4 NTP server address
required: no
ntp_address_backup:
description:
- Backup IPv4 NTP server address
- Queried when primary NTP server fails
required: no
ssh:
type: bool
description:
- Enable ssh access to the controller for debug purposes.
- This is a controller-level setting.
- rlogin/telnet will be enabled for ancient equipment where ssh is not available.
required: no
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
via dhcp, etc), can take seconds or minutes longer to take effect.
- "Known issue: Changes specifically to down ports will result in a failure. However, this may not be the case in up
coming NetApp E-Series firmware releases (released after firmware version 11.40.2)."
'''
EXAMPLES = """
- name: Configure the first port on the A controller with a static IPv4 address
netapp_e_mgmt_interface:
name: "1"
controller: "A"
config_method: static
address: "192.168.1.100"
subnet_mask: "255.255.255.0"
gateway: "192.168.1.1"
ssid: "1"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Disable ipv4 connectivity for the second port on the B controller
netapp_e_mgmt_interface:
name: "2"
controller: "B"
enable_interface: no
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
- name: Enable ssh access for ports one and two on controller A
netapp_e_mgmt_interface:
name: "{{ item }}"
controller: "A"
ssh: yes
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
loop:
- 1
- 2
- name: Configure static DNS settings for the first port on controller A
netapp_e_mgmt_interface:
name: "1"
controller: "A"
dns_config_method: static
dns_address: "192.168.1.100"
dns_address_backup: "192.168.1.1"
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
- name: Configure static NTP settings for ports one and two on controller B
netapp_e_mgmt_interface:
name: "{{ item }}"
controller: "B"
ntp_config_method: static
ntp_address: "129.100.1.100"
ntp_address_backup: "127.100.1.1"
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
loop:
- 1
- 2
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The interface settings have been updated.
enabled:
description:
- Indicates whether IPv4 connectivity has been enabled or disabled.
- This does not necessarily indicate connectivity. If dhcp was enabled absent a dhcp server, for instance,
it is unlikely that the configuration will actually be valid.
returned: on success
sample: True
type: bool
"""
import json
import logging
from pprint import pformat, pprint
import time
import socket
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class MgmtInterface(object):
MAX_RETRIES = 15
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type="str", choices=["enable", "disable"],
aliases=["enable_interface"], required=False),
controller=dict(type="str", required=True, choices=["A", "B"]),
name=dict(type="str", aliases=["port", "iface"]),
channel=dict(type="int"),
address=dict(type="str", required=False),
subnet_mask=dict(type="str", required=False),
gateway=dict(type="str", required=False),
config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
dns_address=dict(type="str", required=False),
dns_address_backup=dict(type="str", required=False),
ntp_config_method=dict(type="str", required=False, choices=["disable", "dhcp", "static"]),
ntp_address=dict(type="str", required=False),
ntp_address_backup=dict(type="str", required=False),
ssh=dict(type="bool", required=False),
log_path=dict(type="str", required=False),
))
required_if = [
["state", "enable", ["config_method"]],
["config_method", "static", ["address", "subnet_mask"]],
["dns_config_method", "static", ["dns_address"]],
["ntp_config_method", "static", ["ntp_address"]],
]
mutually_exclusive = [
["name", "channel"],
]
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
mutually_exclusive=mutually_exclusive)
args = self.module.params
self.controller = args["controller"]
self.name = args["name"]
self.channel = args["channel"]
self.config_method = args["config_method"]
self.address = args["address"]
self.subnet_mask = args["subnet_mask"]
self.gateway = args["gateway"]
self.enable_interface = None if args["state"] is None else args["state"] == "enable"
self.dns_config_method = args["dns_config_method"]
self.dns_address = args["dns_address"]
self.dns_address_backup = args["dns_address_backup"]
self.ntp_config_method = args["ntp_config_method"]
self.ntp_address = args["ntp_address"]
self.ntp_address_backup = args["ntp_address_backup"]
self.ssh = args["ssh"]
self.ssid = args["ssid"]
self.url = args["api_url"]
self.creds = dict(url_password=args["api_password"],
validate_certs=args["validate_certs"],
url_username=args["api_username"], )
self.retries = 0
self.check_mode = self.module.check_mode
self.post_body = dict()
log_path = args["log_path"]
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
@property
def controllers(self):
"""Retrieve a mapping of controller labels to their references
{
'A': '070000000000000000000001',
'B': '070000000000000000000002',
}
:return: the controllers defined on the system
"""
try:
(rc, controllers) = request(self.url + 'storage-systems/%s/controllers'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
controllers = list()
self.module.fail_json(
msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
controllers.sort(key=lambda c: c['physicalLocation']['slot'])
controllers_dict = dict()
i = ord('A')
for controller in controllers:
label = chr(i)
settings = dict(controllerSlot=controller['physicalLocation']['slot'],
controllerRef=controller['controllerRef'],
ssh=controller['networkSettings']['remoteAccessEnabled'])
controllers_dict[label] = settings
i += 1
return controllers_dict
@property
def interface(self):
net_interfaces = list()
try:
(rc, net_interfaces) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
% self.ssid, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
controllers = self.controllers
controller = controllers[self.controller]
net_interfaces = [iface for iface in net_interfaces if iface["controllerRef"] == controller["controllerRef"]]
# Find the correct interface
iface = None
for net in net_interfaces:
if self.name:
if net["alias"] == self.name or net["interfaceName"] == self.name:
iface = net
break
elif self.channel:
if net["channel"] == self.channel:
iface = net
break
if iface is None:
identifier = self.name if self.name is not None else self.channel
self.module.fail_json(msg="We could not find an interface matching [%s] on Array=[%s]."
% (identifier, self.ssid))
return dict(alias=iface["alias"],
channel=iface["channel"],
link_status=iface["linkStatus"],
enabled=iface["ipv4Enabled"],
address=iface["ipv4Address"],
gateway=iface["ipv4GatewayAddress"],
subnet_mask=iface["ipv4SubnetMask"],
dns_config_method=iface["dnsProperties"]["acquisitionProperties"]["dnsAcquisitionType"],
dns_servers=iface["dnsProperties"]["acquisitionProperties"]["dnsServers"],
ntp_config_method=iface["ntpProperties"]["acquisitionProperties"]["ntpAcquisitionType"],
ntp_servers=iface["ntpProperties"]["acquisitionProperties"]["ntpServers"],
config_method=iface["ipv4AddressConfigMethod"],
controllerRef=iface["controllerRef"],
controllerSlot=iface["controllerSlot"],
ipv6Enabled=iface["ipv6Enabled"],
id=iface["interfaceRef"], )
def get_enable_interface_settings(self, iface, expected_iface, update, body):
"""Enable or disable the IPv4 network interface."""
if self.enable_interface:
if not iface["enabled"]:
update = True
body["ipv4Enabled"] = True
else:
if iface["enabled"]:
update = True
body["ipv4Enabled"] = False
expected_iface["enabled"] = body["ipv4Enabled"]
return update, expected_iface, body
def get_interface_settings(self, iface, expected_iface, update, body):
"""Update network interface settings."""
if self.config_method == "dhcp":
if iface["config_method"] != "configDhcp":
update = True
body["ipv4AddressConfigMethod"] = "configDhcp"
else:
if iface["config_method"] != "configStatic":
update = True
body["ipv4AddressConfigMethod"] = "configStatic"
if iface["address"] != self.address:
update = True
body["ipv4Address"] = self.address
if iface["subnet_mask"] != self.subnet_mask:
update = True
body["ipv4SubnetMask"] = self.subnet_mask
if self.gateway and iface["gateway"] != self.gateway:
update = True
body["ipv4GatewayAddress"] = self.gateway
expected_iface["address"] = body["ipv4Address"]
expected_iface["subnet_mask"] = body["ipv4SubnetMask"]
expected_iface["gateway"] = body["ipv4GatewayAddress"]
expected_iface["config_method"] = body["ipv4AddressConfigMethod"]
return update, expected_iface, body
def get_dns_server_settings(self, iface, expected_iface, update, body):
"""Add DNS server information to the request body."""
if self.dns_config_method == "dhcp":
if iface["dns_config_method"] != "dhcp":
update = True
body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="dhcp")
elif self.dns_config_method == "static":
dns_servers = [dict(addressType="ipv4", ipv4Address=self.dns_address)]
if self.dns_address_backup:
dns_servers.append(dict(addressType="ipv4", ipv4Address=self.dns_address_backup))
body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="stat", dnsServers=dns_servers)
if (iface["dns_config_method"] != "stat" or
len(iface["dns_servers"]) != len(dns_servers) or
(len(iface["dns_servers"]) == 2 and
(iface["dns_servers"][0]["ipv4Address"] != self.dns_address or
iface["dns_servers"][1]["ipv4Address"] != self.dns_address_backup)) or
(len(iface["dns_servers"]) == 1 and
iface["dns_servers"][0]["ipv4Address"] != self.dns_address)):
update = True
expected_iface["dns_servers"] = dns_servers
expected_iface["dns_config_method"] = body["dnsAcquisitionDescriptor"]["dnsAcquisitionType"]
return update, expected_iface, body
def get_ntp_server_settings(self, iface, expected_iface, update, body):
"""Add NTP server information to the request body."""
if self.ntp_config_method == "disable":
if iface["ntp_config_method"] != "disabled":
update = True
body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="disabled")
elif self.ntp_config_method == "dhcp":
if iface["ntp_config_method"] != "dhcp":
update = True
body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="dhcp")
elif self.ntp_config_method == "static":
ntp_servers = [dict(addrType="ipvx", ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address))]
if self.ntp_address_backup:
ntp_servers.append(dict(addrType="ipvx",
ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address_backup)))
body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="stat", ntpServers=ntp_servers)
if (iface["ntp_config_method"] != "stat" or
len(iface["ntp_servers"]) != len(ntp_servers) or
((len(iface["ntp_servers"]) == 2 and
(iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address or
iface["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup)) or
(len(iface["ntp_servers"]) == 1 and
iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address))):
update = True
expected_iface["ntp_servers"] = ntp_servers
expected_iface["ntp_config_method"] = body["ntpAcquisitionDescriptor"]["ntpAcquisitionType"]
return update, expected_iface, body
def get_remote_ssh_settings(self, settings, update, body):
"""Configure network interface ports for remote ssh access."""
if self.ssh != settings["ssh"]:
update = True
body["enableRemoteAccess"] = self.ssh
return update, body
def update_array(self, settings, iface):
"""Update controller with new interface, dns service, ntp service and/or remote ssh access information.
:returns: whether information passed will modify the controller's current state
:rtype: bool
"""
update = False
body = dict(controllerRef=settings['controllerRef'],
interfaceRef=iface['id'])
expected_iface = iface.copy()
# Check if api url is using the effected management interface to change itself
update_used_matching_address = False
if self.enable_interface and self.config_method:
netloc = list(urlparse.urlparse(self.url))[1]
address = netloc.split(":")[0]
address_info = socket.getaddrinfo(address, 8443)
url_address_info = socket.getaddrinfo(iface["address"], 8443)
update_used_matching_address = any(info in url_address_info for info in address_info)
self._logger.info("update_used_matching_address: %s", update_used_matching_address)
# Populate the body of the request and check for changes
if self.enable_interface is not None:
update, expected_iface, body = self.get_enable_interface_settings(iface, expected_iface, update, body)
if self.config_method is not None:
update, expected_iface, body = self.get_interface_settings(iface, expected_iface, update, body)
if self.dns_config_method is not None:
update, expected_iface, body = self.get_dns_server_settings(iface, expected_iface, update, body)
if self.ntp_config_method is not None:
update, expected_iface, body = self.get_ntp_server_settings(iface, expected_iface, update, body)
if self.ssh is not None:
update, body = self.get_remote_ssh_settings(settings, update, body)
iface["ssh"] = self.ssh
expected_iface["ssh"] = self.ssh
# debug information
self._logger.info(pformat(body))
self._logger.info(pformat(iface))
self._logger.info(pformat(expected_iface))
if self.check_mode:
return update
if update and not self.check_mode:
if not update_used_matching_address:
try:
(rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
% self.ssid, method='POST', data=json.dumps(body), headers=HEADERS,
timeout=300, ignore_errors=True, **self.creds)
if rc == 422:
if data['retcode'] == "4" or data['retcode'] == "illegalParam":
if not (body['ipv4Enabled'] or iface['ipv6Enabled']):
self.module.fail_json(msg="This storage-system already has IPv6 connectivity disabled. "
"DHCP configuration for IPv4 is required at a minimum."
" Array Id [%s] Message [%s]."
% (self.ssid, data['errorMessage']))
else:
self.module.fail_json(msg="We failed to configure the management interface. Array Id "
"[%s] Message [%s]." % (self.ssid, data))
elif rc >= 300:
self.module.fail_json(
msg="We failed to configure the management interface. Array Id [%s] Message [%s]." %
(self.ssid, data))
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
else:
self.update_api_address_interface_match(body)
return self.validate_changes(expected_iface) if update and iface["link_status"] != "up" else update
def update_api_address_interface_match(self, body):
"""Change network interface address which matches the api_address"""
try:
try:
(rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
use_proxy=False, force=True, ignore_errors=True, method='POST',
data=json.dumps(body), headers=HEADERS, timeout=10, **self.creds)
except Exception:
url_parts = list(urlparse.urlparse(self.url))
domain = url_parts[1].split(":")
domain[0] = self.address
url_parts[1] = ":".join(domain)
expected_url = urlparse.urlunparse(url_parts)
self._logger.info(pformat(expected_url))
(rc, data) = request(expected_url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
headers=HEADERS, timeout=300, **self.creds)
return
except Exception as err:
self._logger.info(type(err))
self.module.fail_json(
msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def validate_changes(self, expected_iface, retry=6):
"""Validate interface changes were applied to the controller interface port. 30 second timeout"""
if self.interface != expected_iface:
time.sleep(5)
if retry:
return self.validate_changes(expected_iface, retry - 1)
self.module.fail_json(msg="Update failure: we failed to verify the necessary state change.")
return True
def check_health(self):
"""It's possible, due to a previous operation, for the API to report a 424 (offline) status for the
storage-system. Therefore, we run a manual check with retries to attempt to contact the system before we
continue.
"""
try:
(rc, data) = request(self.url + 'storage-systems/%s/controllers'
% self.ssid, headers=HEADERS,
ignore_errors=True, **self.creds)
# We've probably recently changed the interface settings and it's still coming back up: retry.
if rc == 424:
if self.retries < self.MAX_RETRIES:
self.retries += 1
self._logger.info("We hit a 424, retrying in 5s.")
time.sleep(5)
self.check_health()
else:
self.module.fail_json(
msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
(self.ssid, data))
elif rc >= 300:
self.module.fail_json(
msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
(self.ssid, data))
# This is going to catch cases like a connection failure
except Exception as err:
if self.retries < self.MAX_RETRIES:
self._logger.info("We hit a connection failure, retrying in 5s.")
self.retries += 1
time.sleep(5)
self.check_health()
else:
self.module.fail_json(
msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
"""Update storage system with necessary changes."""
# Check if the storage array can be contacted
self.check_health()
# make the necessary changes to the storage system
settings = self.controllers[self.controller]
iface = self.interface
self._logger.info(pformat(settings))
self._logger.info(pformat(iface))
update = self.update_array(settings, iface)
self.module.exit_json(msg="The interface settings have been updated.", changed=update)
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = MgmtInterface()
iface()
if __name__ == '__main__':
main()

View file

@ -1,369 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_snapshot_group
short_description: NetApp E-Series manage snapshot groups
description:
- Create, update, delete snapshot groups for NetApp E-series storage arrays
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
state:
description:
- Whether to ensure the group is present or absent.
required: True
choices:
- present
- absent
name:
description:
- The name to give the snapshot group
required: True
base_volume_name:
description:
- The name of the base volume or thin volume to use as the base for the new snapshot group.
- If a snapshot group with an identical C(name) already exists but with a different base volume
an error will be returned.
required: True
repo_pct:
description:
- The size of the repository in relation to the size of the base volume
required: False
default: 20
warning_threshold:
description:
- The repository utilization warning threshold, as a percentage of the repository volume capacity.
required: False
default: 80
delete_limit:
description:
- The automatic deletion indicator.
- If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of
snapshot images limited to the number specified.
- This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
required: False
default: 30
full_policy:
description:
- The behavior on when the data repository becomes full.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
required: False
default: purgepit
choices:
- purgepit
- unknown
- failbasewrites
- __UNDEFINED
storage_pool_name:
required: True
description:
- The name of the storage pool on which to allocate the repository volume.
rollback_priority:
required: False
description:
- The importance of the rollback operation.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
choices:
- highest
- high
- medium
- low
- lowest
- __UNDEFINED
default: medium
'''
EXAMPLES = """
- name: Configure Snapshot group
netapp_e_snapshot_group:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
base_volume_name: SSGroup_test
name=: OOSS_Group
repo_pct: 20
warning_threshold: 85
delete_limit: 30
full_policy: purgepit
storage_pool_name: Disk_Pool_1
rollback_priority: medium
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: json facts for newly created snapshot group.
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class SnapshotGroup(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent']),
base_volume_name=dict(required=True),
name=dict(required=True),
repo_pct=dict(default=20, type='int'),
warning_threshold=dict(default=80, type='int'),
delete_limit=dict(default=30, type='int'),
full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
storage_pool_name=dict(type='str'),
ssid=dict(required=True),
)
self.module = AnsibleModule(argument_spec=argument_spec)
self.post_data = dict()
self.warning_threshold = self.module.params['warning_threshold']
self.base_volume_name = self.module.params['base_volume_name']
self.name = self.module.params['name']
self.repo_pct = self.module.params['repo_pct']
self.delete_limit = self.module.params['delete_limit']
self.full_policy = self.module.params['full_policy']
self.rollback_priority = self.module.params['rollback_priority']
self.storage_pool_name = self.module.params['storage_pool_name']
self.state = self.module.params['state']
self.url = self.module.params['api_url']
self.user = self.module.params['api_username']
self.pwd = self.module.params['api_password']
self.certs = self.module.params['validate_certs']
self.ssid = self.module.params['ssid']
if not self.url.endswith('/'):
self.url += '/'
self.changed = False
@property
def pool_id(self):
pools = 'storage-systems/%s/storage-pools' % self.ssid
url = self.url + pools
try:
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
except Exception as err:
self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
for pool in data:
if pool['name'] == self.storage_pool_name:
self.pool_data = pool
return pool['id']
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
@property
def volume_id(self):
volumes = 'storage-systems/%s/volumes' % self.ssid
url = self.url + volumes
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
qty = 0
for volume in data:
if volume['name'] == self.base_volume_name:
qty += 1
if qty > 1:
self.module.fail_json(msg="More than one volume with the name: %s was found, "
"please ensure your volume has a unique name" % self.base_volume_name)
else:
Id = volume['id']
self.volume = volume
try:
return Id
except NameError:
self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
@property
def snapshot_group_id(self):
url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to fetch snapshot groups. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
for ssg in data:
if ssg['name'] == self.name:
self.ssg_data = ssg
return ssg['id']
return None
@property
def ssg_needs_update(self):
if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
self.ssg_data['repFullPolicy'] != self.full_policy or \
self.ssg_data['rollbackPriority'] != self.rollback_priority:
return True
else:
return False
def create_snapshot_group(self):
self.post_data = dict(
baseMappableObjectId=self.volume_id,
name=self.name,
repositoryPercentage=self.repo_pct,
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
storagePoolId=self.pool_id,
)
snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
url = self.url + snapshot
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to create snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
if not self.snapshot_group_id:
self.snapshot_group_id = self.ssg_data['id']
if self.ssg_needs_update:
self.update_ssg()
else:
self.module.exit_json(changed=True, **self.ssg_data)
def update_ssg(self):
self.post_data = dict(
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
rollbackPriority=self.rollback_priority
)
url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to update snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
def apply(self):
if self.state == 'absent':
if self.snapshot_group_id:
try:
rc, resp = request(
self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to delete snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
else:
self.module.exit_json(changed=False, msg="Snapshot group absent")
elif self.snapshot_group_id:
if self.ssg_needs_update:
self.update_ssg()
self.module.exit_json(changed=True, **self.ssg_data)
else:
self.module.exit_json(changed=False, **self.ssg_data)
else:
self.create_snapshot_group()
def main():
vg = SnapshotGroup()
vg.apply()
if __name__ == '__main__':
main()

View file

@ -1,246 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_snapshot_images
short_description: NetApp E-Series create and delete snapshot images
description:
- Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
- Only the oldest snapshot image can be deleted so consistency is preserved.
- "Related: Snapshot volumes are created from snapshot images."
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
snapshot_group:
description:
- The name of the snapshot group in which you want to create a snapshot image.
required: True
state:
description:
- Whether a new snapshot image should be created or oldest be deleted.
required: True
choices: ['create', 'remove']
'''
EXAMPLES = """
- name: Create Snapshot
netapp_e_snapshot_images:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ validate_certs }}"
snapshot_group: "3300000060080E5000299C24000005B656D9F394"
state: 'create'
"""
RETURN = """
---
msg:
description: State of operation
type: str
returned: always
sample: "Created snapshot image"
image_id:
description: ID of snapshot image
type: str
returned: state == created
sample: "3400000060080E5000299B640063074057BC5C5E "
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
snap_groups_url = api_url + snap_groups
(ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
snapshot_group_id = None
for snapshot_group in snapshot_groups:
if name == snapshot_group['label']:
snapshot_group_id = snapshot_group['pitGroupRef']
break
if snapshot_group_id is None:
module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return snapshot_group
def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
get_status = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + get_status
try:
(ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as err:
module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
(name, ssid, to_native(err)))
if not images:
module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
oldest = min(images, key=lambda x: x['pitSequenceNumber'])
if oldest is None or "pitRef" not in oldest:
module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return oldest
def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
snapshot_group_id = snapshot_group_obj['pitGroupRef']
endpoint = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + endpoint
post_data = json.dumps({'groupId': snapshot_group_id})
image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
if image_data[1]['status'] == 'optimal':
status = True
id = image_data[1]['id']
else:
status = False
id = ''
return status, id
def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
image_id = image['pitRef']
endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
url = api_url + endpoint
try:
(ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as e:
image_data = (e[0], e[1])
if ret == 204:
deleted_status = True
error_message = ''
else:
deleted_status = False
error_message = image_data[1]['errorMessage']
return deleted_status, error_message
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
snapshot_group=dict(required=True, type='str'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
state=dict(required=True, choices=['create', 'remove'], type='str'),
))
module = AnsibleModule(argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
snapshot_group = p.pop('snapshot_group')
desired_state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
if desired_state == 'create':
created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
if created_status:
module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
else:
deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
if deleted:
module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
ssid, snapshot_group, error_msg))
if __name__ == '__main__':
main()

View file

@ -1,280 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_snapshot_volume
short_description: NetApp E-Series manage snapshot volumes.
description:
- Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
author: Kevin Hulquest (@hulquest)
notes:
- Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status
will be returned, no other changes can be made to a pre-existing snapshot volume.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- storage array ID
required: True
snapshot_image_id:
required: True
description:
- The identifier of the snapshot image used to create the new snapshot volume.
- "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want."
full_threshold:
description:
- The repository utilization warning threshold percentage
default: 85
name:
required: True
description:
- The name you wish to give the snapshot volume
view_mode:
required: True
description:
- The snapshot volume access mode
choices:
- modeUnknown
- readWrite
- readOnly
- __UNDEFINED
repo_percentage:
description:
- The size of the view in relation to the size of the base volume
default: 20
storage_pool_name:
description:
- Name of the storage pool on which to allocate the repository volume.
required: True
state:
description:
- Whether to create or remove the snapshot volume
required: True
choices:
- absent
- present
'''
EXAMPLES = """
- name: Snapshot volume
netapp_e_snapshot_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}/"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
state: present
storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
snapshot_image_id: "{{ snapshot_volume_image_id }}"
name: "{{ snapshot_volume_name }}"
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: Json facts for the volume that was created.
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class SnapshotVolume(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
ssid=dict(type='str', required=True),
snapshot_image_id=dict(type='str', required=True),
full_threshold=dict(type='int', default=85),
name=dict(type='str', required=True),
view_mode=dict(type='str', default='readOnly',
choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
repo_percentage=dict(type='int', default=20),
storage_pool_name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present'])
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.state = args['state']
self.ssid = args['ssid']
self.snapshot_image_id = args['snapshot_image_id']
self.full_threshold = args['full_threshold']
self.name = args['name']
self.view_mode = args['view_mode']
self.repo_percentage = args['repo_percentage']
self.storage_pool_name = args['storage_pool_name']
self.url = args['api_url']
self.user = args['api_username']
self.pwd = args['api_password']
self.certs = args['validate_certs']
if not self.url.endswith('/'):
self.url += '/'
@property
def pool_id(self):
pools = 'storage-systems/%s/storage-pools' % self.ssid
url = self.url + pools
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
for pool in data:
if pool['name'] == self.storage_pool_name:
self.pool_data = pool
return pool['id']
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
@property
def ss_vol_exists(self):
rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
if ss_vols:
for ss_vol in ss_vols:
if ss_vol['name'] == self.name:
self.ss_vol = ss_vol
return True
else:
return False
return False
@property
def ss_vol_needs_update(self):
if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
return True
else:
return False
def create_ss_vol(self):
post_data = dict(
snapshotImageId=self.snapshot_image_id,
fullThreshold=self.full_threshold,
name=self.name,
viewMode=self.view_mode,
repositoryPercentage=self.repo_percentage,
repositoryPoolId=self.pool_id
)
rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
url_password=self.pwd, validate_certs=self.certs, method='POST')
self.ss_vol = create_resp
# Doing a check after creation because the creation call fails to set the specified warning threshold
if self.ss_vol_needs_update:
self.update_ss_vol()
else:
self.module.exit_json(changed=True, **create_resp)
def update_ss_vol(self):
post_data = dict(
fullThreshold=self.full_threshold,
)
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
method='POST', validate_certs=self.certs)
self.module.exit_json(changed=True, **resp)
def remove_ss_vol(self):
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
method='DELETE')
self.module.exit_json(changed=True, msg="Volume successfully deleted")
def apply(self):
if self.state == 'present':
if self.ss_vol_exists:
if self.ss_vol_needs_update:
self.update_ss_vol()
else:
self.module.exit_json(changed=False, **self.ss_vol)
else:
self.create_ss_vol()
else:
if self.ss_vol_exists:
self.remove_ss_vol()
else:
self.module.exit_json(changed=False, msg="Volume already absent")
def main():
sv = SnapshotVolume()
sv.apply()
if __name__ == '__main__':
main()

View file

@ -1,295 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_storage_system
short_description: NetApp E-Series Web Services Proxy manage storage arrays
description:
- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
options:
api_username:
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_password:
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_url:
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
required: true
validate_certs:
description:
- Should https certificates be validated?
type: bool
default: 'yes'
ssid:
description:
- The ID of the array to manage. This value must be unique for each array.
required: true
state:
description:
- Whether the specified array should be configured on the Web Services Proxy or not.
required: true
choices: ['present', 'absent']
controller_addresses:
description:
- The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
required: true
array_wwn:
description:
- The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of
controller_addresses parameter.
array_password:
description:
- The management password of the array to manage, if set.
enable_trace:
description:
- Enable trace logging for SYMbol calls to the storage system.
type: bool
default: 'no'
meta_tags:
description:
- Optional meta tags to associate to this storage system
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
---
- name: Presence of storage system
netapp_e_storage_system:
ssid: "{{ item.key }}"
state: present
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
controller_addresses:
- "{{ item.value.address1 }}"
- "{{ item.value.address2 }}"
with_dict: "{{ storage_systems }}"
when: check_storage_system
'''
RETURN = '''
msg:
description: State of request
type: str
returned: always
sample: 'Storage system removed.'
'''
import json
from datetime import datetime as dt, timedelta
from time import sleep
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
(rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
method='POST', url_username=api_usr, url_password=api_pwd,
validate_certs=validate_certs)
status = None
return_resp = resp
if 'status' in resp:
status = resp['status']
if rc == 201:
status = 'neverContacted'
fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
while status == 'neverContacted':
if dt.utcnow() > fail_after_time:
raise Exception("web proxy timed out waiting for array status")
sleep(1)
(rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
headers=dict(Accept="application/json"), url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
status = system_resp['status']
return_resp = system_resp
return status, return_resp
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
ssid=dict(required=True, type='str'),
controller_addresses=dict(type='list'),
array_wwn=dict(required=False, type='str'),
array_password=dict(required=False, type='str', no_log=True),
array_status_timeout_sec=dict(default=60, type='int'),
enable_trace=dict(default=False, type='bool'),
meta_tags=dict(type='list')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['controller_addresses', 'array_wwn']],
required_if=[('state', 'present', ['controller_addresses'])]
)
p = module.params
state = p['state']
ssid = p['ssid']
controller_addresses = p['controller_addresses']
array_wwn = p['array_wwn']
array_password = p['array_password']
array_status_timeout_sec = p['array_status_timeout_sec']
validate_certs = p['validate_certs']
meta_tags = p['meta_tags']
enable_trace = p['enable_trace']
api_usr = p['api_username']
api_pwd = p['api_password']
api_url = p['api_url']
changed = False
array_exists = False
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
except Exception as err:
module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err)))
array_exists = True
array_detail = resp
if rc == 200:
if state == 'absent':
changed = True
array_exists = False
elif state == 'present':
current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
if set(controller_addresses) != current_addresses:
changed = True
if array_detail['wwn'] != array_wwn and array_wwn is not None:
module.fail_json(
msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' %
(ssid, array_detail['wwn'])
)
elif rc == 404:
if state == 'present':
changed = True
array_exists = False
else:
changed = False
module.exit_json(changed=changed, msg="Storage system was not present.")
if changed and not module.check_mode:
if state == 'present':
if not array_exists:
# add the array
array_add_req = dict(
id=ssid,
controllerAddresses=controller_addresses,
metaTags=meta_tags,
enableTrace=enable_trace
)
if array_wwn:
array_add_req['wwn'] = array_wwn
if array_password:
array_add_req['password'] = array_password
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
request_data = json.dumps(array_add_req)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, request_data, to_native(err)))
else: # array exists, modify...
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
post_body = dict(
controllerAddresses=controller_addresses,
removeAllTags=True,
enableTrace=enable_trace,
metaTags=meta_tags
)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, post_body, to_native(err)))
elif state == 'absent':
# delete the array
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs)
except Exception as err:
module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err)))
if rc == 422:
module.exit_json(changed=changed, msg="Storage system was not presented.")
if rc == 204:
module.exit_json(changed=changed, msg="Storage system removed.")
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()

View file

@ -1,935 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"}
DOCUMENTATION = '''
---
module: netapp_e_storagepool
short_description: NetApp E-Series manage volume groups and disk pools
description: Create or remove volume groups and disk pools for NetApp E-series storage arrays.
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Whether the specified storage pool should exist or not.
- Note that removing a storage pool currently requires the removal of all defined volumes first.
required: true
choices: ["present", "absent"]
name:
description:
- The name of the storage pool to manage
required: true
criteria_drive_count:
description:
- The number of disks to use for building the storage pool.
- When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
- The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below)
required: false
type: int
criteria_min_usable_capacity:
description:
- The minimum size of the storage pool (in size_unit).
- When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
- The pool will be expanded if this value exceeds its current size. (See expansion note below)
required: false
type: float
criteria_drive_type:
description:
- The type of disk (hdd or ssd) to use when searching for candidates to use.
- When not specified each drive type will be evaluated until successful drive candidates are found starting with
the most prevalent drive type.
required: false
choices: ["hdd","ssd"]
criteria_size_unit:
description:
- The unit used to interpret size parameters
choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"]
default: "gb"
criteria_drive_min_size:
description:
- The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
criteria_drive_interface_type:
description:
- The interface type to use when selecting drives for the storage pool
- If not provided then all interface types will be considered.
choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"]
required: false
criteria_drive_require_da:
description:
- Ensures the storage pool will be created with only data assurance (DA) capable drives.
- Only available for new storage pools; existing storage pools cannot be converted.
default: false
type: bool
criteria_drive_require_fde:
description:
- Whether full disk encryption ability is required for drives to be added to the storage pool
default: false
type: bool
raid_level:
description:
- The RAID level of the storage pool to be created.
- Required only when I(state=="present").
- When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required
depending on the storage array specifications.
- When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required.
- When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required.
- When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required.
- When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required.
- When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required.
- Note that raidAll will be treated as raidDiskPool and raid3 as raid5.
required: false
choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"]
default: "raidDiskPool"
secure_pool:
description:
- Enables security at rest feature on the storage pool.
- Will only work if all drives in the pool are security capable (FDE, FIPS, or mix)
- Warning, once security is enabled it is impossible to disable without erasing the drives.
required: false
type: bool
reserve_drive_count:
description:
- Set the number of drives reserved by the storage pool for reconstruction operations.
- Only valid on raid disk pools.
required: false
remove_volumes:
description:
- Prior to removing a storage pool, delete all volumes in the pool.
default: true
erase_secured_drives:
description:
- If I(state=="absent") then all storage pool drives will be erase
- If I(state=="present") then delete all available storage array drives that have security enabled.
default: true
type: bool
notes:
- The expansion operations are non-blocking due to the time consuming nature of expanding volume groups
- Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each
required step will be attempted until the request fails which is likely because of the required expansion time.
- raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5.
- Tray loss protection and drawer loss protection will be chosen if at all possible.
'''
EXAMPLES = """
- name: No disk groups
netapp_e_storagepool:
ssid: "{{ ssid }}"
name: "{{ item }}"
state: absent
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: Json facts for the pool that was created.
"""
import functools
from itertools import groupby
from time import sleep
from pprint import pformat
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
from ansible.module_utils._text import to_native
def get_most_common_elements(iterator):
"""Returns a generator containing a descending list of most common elements."""
if not isinstance(iterator, list):
raise TypeError("iterator must be a list.")
grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))]
return sorted(grouped, key=lambda x: x[1], reverse=True)
def memoize(func):
"""Generic memoizer for any function with any number of arguments including zero."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
class MemoizeFuncArgs(dict):
def __missing__(self, _key):
self[_key] = func(*args, **kwargs)
return self[_key]
key = str((args, kwargs)) if args and kwargs else "no_argument_response"
return MemoizeFuncArgs().__getitem__(key)
return wrapper
class NetAppESeriesStoragePool(NetAppESeriesModule):
EXPANSION_TIMEOUT_SEC = 10
DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
def __init__(self):
version = "02.00.0000.0000"
ansible_options = dict(
state=dict(required=True, choices=["present", "absent"], type="str"),
name=dict(required=True, type="str"),
criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
default="gb", type="str"),
criteria_drive_count=dict(type="int"),
criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"],
type="str"),
criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False),
criteria_drive_min_size=dict(type="float"),
criteria_drive_require_da=dict(type="bool", required=False),
criteria_drive_require_fde=dict(type="bool", required=False),
criteria_min_usable_capacity=dict(type="float"),
raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"],
default="raidDiskPool"),
erase_secured_drives=dict(type="bool", default=True),
secure_pool=dict(type="bool", default=False),
reserve_drive_count=dict(type="int"),
remove_volumes=dict(type="bool", default=True))
required_if = [["state", "present", ["raid_level"]]]
super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options,
web_services_version=version,
supports_check_mode=True,
required_if=required_if)
args = self.module.params
self.state = args["state"]
self.ssid = args["ssid"]
self.name = args["name"]
self.criteria_drive_count = args["criteria_drive_count"]
self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"]
self.criteria_size_unit = args["criteria_size_unit"]
self.criteria_drive_min_size = args["criteria_drive_min_size"]
self.criteria_drive_type = args["criteria_drive_type"]
self.criteria_drive_interface_type = args["criteria_drive_interface_type"]
self.criteria_drive_require_fde = args["criteria_drive_require_fde"]
self.criteria_drive_require_da = args["criteria_drive_require_da"]
self.raid_level = args["raid_level"]
self.erase_secured_drives = args["erase_secured_drives"]
self.secure_pool = args["secure_pool"]
self.reserve_drive_count = args["reserve_drive_count"]
self.remove_volumes = args["remove_volumes"]
self.pool_detail = None
# Change all sizes to be measured in bytes
if self.criteria_min_usable_capacity:
self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity *
self.SIZE_UNIT_MAP[self.criteria_size_unit])
if self.criteria_drive_min_size:
self.criteria_drive_min_size = int(self.criteria_drive_min_size *
self.SIZE_UNIT_MAP[self.criteria_size_unit])
self.criteria_size_unit = "bytes"
# Adjust unused raid level option to reflect documentation
if self.raid_level == "raidAll":
self.raid_level = "raidDiskPool"
if self.raid_level == "raid3":
self.raid_level = "raid5"
@property
@memoize
def available_drives(self):
"""Determine the list of available drives"""
return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"]
@property
@memoize
def available_drive_types(self):
"""Determine the types of available drives sorted by the most common first."""
types = [drive["driveMediaType"] for drive in self.drives]
return [entry[0] for entry in get_most_common_elements(types)]
@property
@memoize
def available_drive_interface_types(self):
"""Determine the types of available drives."""
interfaces = [drive["phyDriveType"] for drive in self.drives]
return [entry[0] for entry in get_most_common_elements(interfaces)]
@property
def storage_pool_drives(self, exclude_hotspares=True):
"""Retrieve list of drives found in storage pool."""
if exclude_hotspares:
return [drive for drive in self.drives
if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]]
return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"]]
@property
def expandable_drive_count(self):
"""Maximum number of drives that a storage pool can be expanded at a given time."""
capabilities = None
if self.raid_level == "raidDiskPool":
return len(self.available_drives)
try:
rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return capabilities["featureParameters"]["maxDCEDrives"]
@property
def disk_pool_drive_minimum(self):
"""Provide the storage array's minimum disk pool drive count."""
rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True)
# Standard minimum is 11 drives but some allow 10 drives. 10 will be the default
if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or
attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0):
return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT
return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"]
def get_available_drive_capacities(self, drive_id_list=None):
"""Determine the list of available drive capacities."""
if drive_id_list:
available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
if drive["id"] in drive_id_list and drive["available"] and
drive["status"] == "optimal"])
else:
available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
if drive["available"] and drive["status"] == "optimal"])
self.module.log("available drive capacities: %s" % available_drive_capacities)
return list(available_drive_capacities)
@property
def drives(self):
"""Retrieve list of drives found in storage pool."""
drives = None
try:
rc, drives = self.request("storage-systems/%s/drives" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
return drives
def is_drive_count_valid(self, drive_count):
"""Validate drive count criteria is met."""
if self.criteria_drive_count and drive_count < self.criteria_drive_count:
return False
if self.raid_level == "raidDiskPool":
return drive_count >= self.disk_pool_drive_minimum
if self.raid_level == "raid0":
return drive_count > 0
if self.raid_level == "raid1":
return drive_count >= 2 and (drive_count % 2) == 0
if self.raid_level in ["raid3", "raid5"]:
return 3 <= drive_count <= 30
if self.raid_level == "raid6":
return 5 <= drive_count <= 30
return False
@property
def storage_pool(self):
"""Retrieve storage pool information."""
storage_pools_resp = None
try:
rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
% (self.ssid, to_native(err), self.state))
pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name]
return pool_detail[0] if pool_detail else dict()
@property
def storage_pool_volumes(self):
"""Retrieve list of volumes associated with storage pool."""
volumes_resp = None
try:
rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
% (self.ssid, to_native(err), self.state))
group_ref = self.storage_pool["volumeGroupRef"]
storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref]
return storage_pool_volume_list
def get_ddp_capacity(self, expansion_drive_list):
"""Return the total usable capacity based on the additional drives."""
def get_ddp_error_percent(_drive_count, _extent_count):
"""Determine the space reserved for reconstruction"""
if _drive_count <= 36:
if _extent_count <= 600:
return 0.40
elif _extent_count <= 1400:
return 0.35
elif _extent_count <= 6200:
return 0.20
elif _extent_count <= 50000:
return 0.15
elif _drive_count <= 64:
if _extent_count <= 600:
return 0.20
elif _extent_count <= 1400:
return 0.15
elif _extent_count <= 6200:
return 0.10
elif _extent_count <= 50000:
return 0.05
elif _drive_count <= 480:
if _extent_count <= 600:
return 0.20
elif _extent_count <= 1400:
return 0.15
elif _extent_count <= 6200:
return 0.10
elif _extent_count <= 50000:
return 0.05
self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid)
def get_ddp_reserved_drive_count(_disk_count):
"""Determine the number of reserved drive."""
reserve_count = 0
if self.reserve_drive_count:
reserve_count = self.reserve_drive_count
elif _disk_count >= 256:
reserve_count = 8
elif _disk_count >= 192:
reserve_count = 7
elif _disk_count >= 128:
reserve_count = 6
elif _disk_count >= 64:
reserve_count = 4
elif _disk_count >= 32:
reserve_count = 3
elif _disk_count >= 12:
reserve_count = 2
elif _disk_count == 11:
reserve_count = 1
return reserve_count
if self.pool_detail:
drive_count = len(self.storage_pool_drives) + len(expansion_drive_list)
else:
drive_count = len(expansion_drive_list)
drive_usable_capacity = min(min(self.get_available_drive_capacities()),
min(self.get_available_drive_capacities(expansion_drive_list)))
drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912)
maximum_stripe_count = (drive_count * drive_data_extents) / 10
error_percent = get_ddp_error_percent(drive_count, drive_data_extents)
error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10
total_stripe_count = maximum_stripe_count - error_overhead
stripe_count_per_drive = total_stripe_count / drive_count
reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive
available_stripe_count = total_stripe_count - reserved_stripe_count
return available_stripe_count * 4294967296
@memoize
def get_candidate_drives(self):
"""Retrieve set of drives candidates for creating a new storage pool."""
def get_candidate_drive_request():
"""Perform request for new volume creation."""
candidates_list = list()
drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types
interface_types = [self.criteria_drive_interface_type] \
if self.criteria_drive_interface_type else self.available_drive_interface_types
for interface_type in interface_types:
for drive_type in drive_types:
candidates = None
volume_candidate_request_data = dict(
type="diskPool" if self.raid_level == "raidDiskPool" else "traditional",
diskPoolVolumeCandidateRequestData=dict(
reconstructionReservedDriveCount=65535))
candidate_selection_type = dict(
candidateSelectionType="count",
driveRefList=dict(driveRef=self.available_drives))
criteria = dict(raidLevel=self.raid_level,
phyDriveType=interface_type,
dssPreallocEnabled=False,
securityType="capable" if self.criteria_drive_require_fde else "none",
driveMediaType=drive_type,
onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False,
volumeCandidateRequestData=volume_candidate_request_data,
allocateReserveSpace=False,
securityLevel="fde" if self.criteria_drive_require_fde else "none",
candidateSelectionType=candidate_selection_type)
try:
rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError"
"Response=true" % self.ssid, data=criteria, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
% (self.ssid, to_native(error)))
if candidates:
candidates_list.extend(candidates["volumeCandidate"])
# Sort output based on tray and then drawer protection first
tray_drawer_protection = list()
tray_protection = list()
drawer_protection = list()
no_protection = list()
sorted_candidates = list()
for item in candidates_list:
if item["trayLossProtection"]:
if item["drawerLossProtection"]:
tray_drawer_protection.append(item)
else:
tray_protection.append(item)
elif item["drawerLossProtection"]:
drawer_protection.append(item)
else:
no_protection.append(item)
if tray_drawer_protection:
sorted_candidates.extend(tray_drawer_protection)
if tray_protection:
sorted_candidates.extend(tray_protection)
if drawer_protection:
sorted_candidates.extend(drawer_protection)
if no_protection:
sorted_candidates.extend(no_protection)
return sorted_candidates
# Determine the appropriate candidate list
for candidate in get_candidate_drive_request():
# Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size
if self.criteria_drive_count:
if self.criteria_drive_count != int(candidate["driveCount"]):
continue
if self.criteria_min_usable_capacity:
if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity >
self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or
self.criteria_min_usable_capacity > int(candidate["usableSize"])):
continue
if self.criteria_drive_min_size:
if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])):
continue
return candidate
self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
@memoize
def get_expansion_candidate_drives(self):
"""Retrieve required expansion drive list.
Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there
is a potential limitation on how many drives can be incorporated at a time.
* Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools.
:return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint
"""
def get_expansion_candidate_drive_request():
"""Perform the request for expanding existing volume groups or disk pools.
Note: the list of candidate structures do not necessarily produce candidates that meet all criteria.
"""
candidates_list = None
url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid
if self.raid_level == "raidDiskPool":
url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid
try:
rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"])
except Exception as error:
self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
% (self.ssid, to_native(error)))
return candidates_list["candidates"]
required_candidate_list = list()
required_additional_drives = 0
required_additional_capacity = 0
total_required_capacity = 0
# determine whether and how much expansion is need to satisfy the specified criteria
if self.criteria_min_usable_capacity:
total_required_capacity = self.criteria_min_usable_capacity
required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"])
if self.criteria_drive_count:
required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives)
# Determine the appropriate expansion candidate list
if required_additional_drives > 0 or required_additional_capacity > 0:
for candidate in get_expansion_candidate_drive_request():
if self.criteria_drive_min_size:
if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])):
continue
if self.raid_level == "raidDiskPool":
if (len(candidate["drives"]) >= required_additional_drives and
self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity):
required_candidate_list.append(candidate)
break
else:
required_additional_drives -= len(candidate["drives"])
required_additional_capacity -= int(candidate["usableCapacity"])
required_candidate_list.append(candidate)
# Determine if required drives and capacities are satisfied
if required_additional_drives <= 0 and required_additional_capacity <= 0:
break
else:
self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
return required_candidate_list
def get_reserve_drive_count(self):
"""Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool)."""
if not self.pool_detail:
self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid)
if self.raid_level != "raidDiskPool":
self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
% (self.pool_detail["id"], self.ssid))
return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"]
def get_maximum_reserve_drive_count(self):
"""Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool)."""
if self.raid_level != "raidDiskPool":
self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
% (self.pool_detail["id"], self.ssid))
drives_ids = list()
if self.pool_detail:
drives_ids.extend(self.storage_pool_drives)
for candidate in self.get_expansion_candidate_drives():
drives_ids.extend((candidate["drives"]))
else:
candidate = self.get_candidate_drives()
drives_ids.extend(candidate["driveRefList"]["driveRef"])
drive_count = len(drives_ids)
maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10)
if maximum_reserve_drive_count > 10:
maximum_reserve_drive_count = 10
return maximum_reserve_drive_count
def set_reserve_drive_count(self, check_mode=False):
"""Set the reserve drive count for raidDiskPool."""
changed = False
if self.raid_level == "raidDiskPool" and self.reserve_drive_count:
maximum_count = self.get_maximum_reserve_drive_count()
if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count:
self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. "
"Note that it may be necessary to wait for expansion operations to complete "
"before the adjusting the reserve drive count. Maximum [%s]. Array [%s]."
% (maximum_count, self.ssid))
if self.reserve_drive_count != self.get_reserve_drive_count():
changed = True
if not check_mode:
try:
rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid,
method="POST", data=dict(volumeGroupRef=self.pool_detail["id"],
newDriveCount=self.reserve_drive_count))
except Exception as error:
self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]."
" Array [%s]." % (self.pool_detail["id"], self.ssid))
return changed
def erase_all_available_secured_drives(self, check_mode=False):
"""Erase all available drives that have encryption at rest feature enabled."""
changed = False
drives_list = list()
for drive in self.drives:
if drive["available"] and drive["fdeEnabled"]:
changed = True
drives_list.append(drive["id"])
if drives_list and not check_mode:
try:
rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
% self.ssid, method="POST", data=dict(driveRef=drives_list))
except Exception as error:
self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid)
return changed
def create_storage_pool(self):
"""Create new storage pool."""
url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid
request_body = dict(label=self.name,
candidate=self.get_candidate_drives())
if self.raid_level == "raidDiskPool":
url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid
request_body.update(
dict(backgroundOperationPriority="useDefault",
criticalReconstructPriority="useDefault",
degradedReconstructPriority="useDefault",
poolUtilizationCriticalThreshold=65535,
poolUtilizationWarningThreshold=0))
if self.reserve_drive_count:
request_body.update(dict(volumeCandidateData=dict(
diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count))))
try:
rc, resp = self.request(url, method="POST", data=request_body)
except Exception as error:
self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]."
% (self.ssid, to_native(error)))
# Update drive and storage pool information
self.pool_detail = self.storage_pool
def delete_storage_pool(self):
"""Delete storage pool."""
storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]]
try:
delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else ""
rc, resp = self.request("storage-systems/%s/storage-pools/%s%s"
% (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]."
% (self.pool_detail["id"], self.ssid, to_native(error)))
if storage_pool_drives and self.erase_secured_drives:
try:
rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
% self.ssid, method="POST", data=dict(driveRef=storage_pool_drives))
except Exception as error:
self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]."
" Error [%s]." % (self.ssid, to_native(error)))
def secure_storage_pool(self, check_mode=False):
"""Enable security on an existing storage pool"""
self.pool_detail = self.storage_pool
needs_secure_pool = False
if not self.secure_pool and self.pool_detail["securityType"] == "enabled":
self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.")
if self.secure_pool and self.pool_detail["securityType"] != "enabled":
needs_secure_pool = True
if needs_secure_pool and not check_mode:
try:
rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]),
data=dict(securePool=True), method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error"
" [%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
self.pool_detail = self.storage_pool
return needs_secure_pool
def migrate_raid_level(self, check_mode=False):
"""Request storage pool raid level migration."""
needs_migration = self.raid_level != self.pool_detail["raidLevel"]
if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool":
self.module.fail_json(msg="Raid level cannot be changed for disk pools")
if needs_migration and not check_mode:
sp_raid_migrate_req = dict(raidLevel=self.raid_level)
try:
rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration"
% (self.ssid, self.name), data=sp_raid_migrate_req, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]."
" Error[%s]." % (self.ssid, to_native(error)))
self.pool_detail = self.storage_pool
return needs_migration
def expand_storage_pool(self, check_mode=False):
"""Add drives to existing storage pool.
:return bool: whether drives were required to be added to satisfy the specified criteria."""
expansion_candidate_list = self.get_expansion_candidate_drives()
changed_required = bool(expansion_candidate_list)
estimated_completion_time = 0.0
# build expandable groupings of traditional raid candidate
required_expansion_candidate_list = list()
while expansion_candidate_list:
subset = list()
while expansion_candidate_list and len(subset) < self.expandable_drive_count:
subset.extend(expansion_candidate_list.pop()["drives"])
required_expansion_candidate_list.append(subset)
if required_expansion_candidate_list and not check_mode:
url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid
if self.raid_level == "raidDiskPool":
url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid
while required_expansion_candidate_list:
candidate_drives_list = required_expansion_candidate_list.pop()
request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"],
driveRef=candidate_drives_list)
try:
rc, resp = self.request(url, method="POST", data=request_body)
except Exception as error:
rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
% (self.ssid, self.pool_detail["id"]), ignore_errors=True)
if rc == 200 and actions_resp:
actions = [action["currentAction"] for action in actions_resp
if action["volumeRef"] in self.storage_pool_volumes]
self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions"
" in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]."
% (", ".join(actions), self.pool_detail["id"], self.ssid,
to_native(error)))
self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]."
" Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
# Wait for expansion completion unless it is the last request in the candidate list
if required_expansion_candidate_list:
for dummy in range(self.EXPANSION_TIMEOUT_SEC):
rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
% (self.ssid, self.pool_detail["id"]), ignore_errors=True)
if rc == 200:
for action in actions_resp:
if (action["volumeRef"] in self.storage_pool_volumes and
action["currentAction"] == "remappingDce"):
sleep(1)
estimated_completion_time = action["estimatedTimeToCompletion"]
break
else:
estimated_completion_time = 0.0
break
return changed_required, estimated_completion_time
def apply(self):
"""Apply requested state to storage array."""
changed = False
if self.state == "present":
if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None:
self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be"
" specified.")
if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count):
self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.")
self.pool_detail = self.storage_pool
self.module.log(pformat(self.pool_detail))
if self.state == "present" and self.erase_secured_drives:
self.erase_all_available_secured_drives(check_mode=True)
# Determine whether changes need to be applied to the storage array
if self.pool_detail:
if self.state == "absent":
changed = True
elif self.state == "present":
if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives):
self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]."
% (self.ssid, self.pool_detail["id"]))
if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]:
self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type."
" Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"]))
if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da !=
self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]):
self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]."
" Pool [%s]." % (self.ssid, self.pool_detail["id"]))
# Evaluate current storage pool for required change.
needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True)
if needs_expansion:
changed = True
if self.migrate_raid_level(check_mode=True):
changed = True
if self.secure_storage_pool(check_mode=True):
changed = True
if self.set_reserve_drive_count(check_mode=True):
changed = True
elif self.state == "present":
changed = True
# Apply changes to storage array
msg = "No changes were required for the storage pool [%s]."
if changed and not self.module.check_mode:
if self.state == "present":
if self.erase_secured_drives:
self.erase_all_available_secured_drives()
if self.pool_detail:
change_list = list()
# Expansion needs to occur before raid level migration to account for any sizing needs.
expanded, estimated_completion_time = self.expand_storage_pool()
if expanded:
change_list.append("expanded")
if self.migrate_raid_level():
change_list.append("raid migration")
if self.secure_storage_pool():
change_list.append("secured")
if self.set_reserve_drive_count():
change_list.append("adjusted reserve drive count")
if change_list:
msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list)
if expanded:
msg += "\nThe expansion operation will complete in an estimated %s minutes."\
% estimated_completion_time
else:
self.create_storage_pool()
msg = "Storage pool [%s] was created."
if self.secure_storage_pool():
msg = "Storage pool [%s] was created and secured."
if self.set_reserve_drive_count():
msg += " Adjusted reserve drive count."
elif self.pool_detail:
self.delete_storage_pool()
msg = "Storage pool [%s] removed."
self.pool_detail = self.storage_pool
self.module.log(pformat(self.pool_detail))
self.module.log(msg % self.name)
self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail)
def main():
storage_pool = NetAppESeriesStoragePool()
storage_pool.apply()
if __name__ == "__main__":
main()

View file

@ -1,280 +0,0 @@
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_syslog
short_description: NetApp E-Series manage syslog settings
description:
- Allow the syslog settings to be configured for an individual E-Series storage-system
author: Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Add or remove the syslog server configuration for E-Series storage array.
- Existing syslog server configuration will be removed or updated when its address matches I(address).
- Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be
treated as a match.
choices:
- present
- absent
default: present
address:
description:
- The syslog server's IPv4 address or a fully qualified hostname.
- All existing syslog configurations will be removed when I(state=absent) and I(address=None).
port:
description:
- This is the port the syslog server is using.
default: 514
protocol:
description:
- This is the transmission protocol the syslog server's using to receive syslog messages.
choices:
- udp
- tcp
- tls
default: udp
components:
description:
- The e-series logging components define the specific logs to transfer to the syslog server.
- At the time of writing, 'auditLog' is the only logging component but more may become available.
default: ["auditLog"]
test:
description:
- This forces a test syslog message to be sent to the stated syslog server.
- Only attempts transmission when I(state=present).
type: bool
default: no
log_path:
description:
- This argument specifies a local path for logging purposes.
required: no
notes:
- Check mode is supported.
- This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
SANtricity OS 11.40.2) and higher.
'''
EXAMPLES = """
- name: Add two syslog server configurations to NetApp E-Series storage array.
netapp_e_syslog:
state: present
address: "{{ item }}"
port: 514
protocol: tcp
component: "auditLog"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
loop:
- "192.168.1.1"
- "192.168.1.100"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
syslog:
description:
- True if syslog server configuration has been added to e-series storage array.
returned: on success
sample: True
type: bool
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Syslog(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(choices=["present", "absent"], required=False, default="present"),
address=dict(type="str", required=False),
port=dict(type="int", default=514, required=False),
protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False),
components=dict(type="list", required=False, default=["auditLog"]),
test=dict(type="bool", default=False, required=False),
log_path=dict(type="str", required=False),
))
required_if = [
["state", "present", ["address", "port", "protocol", "components"]],
]
mutually_exclusive = [
["test", "absent"],
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if,
mutually_exclusive=mutually_exclusive)
args = self.module.params
self.syslog = args["state"] in ["present"]
self.address = args["address"]
self.port = args["port"]
self.protocol = args["protocol"]
self.components = args["components"]
self.test = args["test"]
self.ssid = args["ssid"]
self.url = args["api_url"]
self.creds = dict(url_password=args["api_password"],
validate_certs=args["validate_certs"],
url_username=args["api_username"], )
self.components.sort()
self.check_mode = self.module.check_mode
# logging setup
log_path = args["log_path"]
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
def get_configuration(self):
"""Retrieve existing syslog configuration."""
try:
(rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
headers=HEADERS, **self.creds)
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def test_configuration(self, body):
"""Send test syslog message to the storage array.
Allows fix number of retries to occur before failure is issued to give the storage array time to create
new syslog server record.
"""
try:
(rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}/test".format(self.ssid, body["id"]),
method='POST', headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="We failed to send test message! Array Id [{0}]. Error [{1}].".format(self.ssid, to_native(err)))
def update_configuration(self):
"""Post the syslog request to array."""
config_match = None
perfect_match = None
update = False
body = dict()
# search existing configuration for syslog server entry match
configs = self.get_configuration()
if self.address:
for config in configs:
if config["serverAddress"] == self.address:
config_match = config
if (config["port"] == self.port and config["protocol"] == self.protocol and
len(config["components"]) == len(self.components) and
all([component["type"] in self.components for component in config["components"]])):
perfect_match = config_match
break
# generate body for the http request
if self.syslog:
if not perfect_match:
update = True
if config_match:
body.update(dict(id=config_match["id"]))
components = [dict(type=component_type) for component_type in self.components]
body.update(dict(serverAddress=self.address, port=self.port,
protocol=self.protocol, components=components))
self._logger.info(body)
self.make_configuration_request(body)
# remove specific syslog server configuration
elif self.address:
update = True
body.update(dict(id=config_match["id"]))
self._logger.info(body)
self.make_configuration_request(body)
# if no address is specified, remove all syslog server configurations
elif configs:
update = True
for config in configs:
body.update(dict(id=config["id"]))
self._logger.info(body)
self.make_configuration_request(body)
return update
def make_configuration_request(self, body):
# make http request(s)
if not self.check_mode:
try:
if self.syslog:
if "id" in body:
(rc, result) = request(
self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
else:
(rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
body.update(result)
# send syslog test message
if self.test:
self.test_configuration(body)
elif "id" in body:
(rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
method='DELETE', headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
"""Update configuration and respond to ansible."""
update = self.update_configuration()
self.module.exit_json(msg="The syslog settings have been updated.", changed=update)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Syslog()
settings()
if __name__ == "__main__":
main()

View file

@ -1,845 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_volume
short_description: NetApp E-Series manage storage volumes (standard and thin)
description:
- Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
state:
description:
- Whether the specified volume should exist
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
storage_pool_name:
description:
- Required only when requested I(state=='present').
- Name of the storage pool wherein the volume should reside.
required: false
size_unit:
description:
- The unit used to interpret the size parameter
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
size:
description:
- Required only when I(state=='present').
- Size of the volume in I(size_unit).
- Size of the virtual volume in the case of a thin volume in I(size_unit).
- Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may
exist.
required: true
segment_size_kb:
description:
- Segment size of the volume
- All values are in kibibytes.
- Some common choices include '8', '16', '32', '64', '128', '256', and '512' but options are system
dependent.
- Retrieve the definitive system list from M(netapp_e_facts) under segment_sizes.
- When the storage pool is a raidDiskPool then the segment size must be 128kb.
- Segment size migrations are not allowed in this module
default: '128'
thin_provision:
description:
- Whether the volume should be thin provisioned.
- Thin volumes can only be created when I(raid_level=="raidDiskPool").
- Generally, use of thin-provisioning is not recommended due to performance impacts.
type: bool
default: false
thin_volume_repo_size:
description:
- This value (in size_unit) sets the allocated space for the thin provisioned repository.
- Initial value must between or equal to 4gb and 256gb in increments of 4gb.
- During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb.
- This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic").
- Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
required: false
thin_volume_max_repo_size:
description:
- This is the maximum amount the thin volume repository will be allowed to grow.
- Only has significance when I(thin_volume_expansion_policy=="automatic").
- When the percentage I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds
I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute
the I(thin_volume_expansion_policy) policy.
- Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum
repository size.
default: same as size (in size_unit)
thin_volume_expansion_policy:
description:
- This is the thin volume expansion policy.
- When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the
I(thin_volume_max_repo_size) will be automatically expanded.
- When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the
storage system will wait for manual intervention.
- The thin volume_expansion policy can not be modified on existing thin volumes in this module.
- Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
choices: ["automatic", "manual"]
default: "automatic"
thin_volume_growth_alert_threshold:
description:
- This is the thin provision repository utilization threshold (in percent).
- When the percentage of used storage of the maximum repository size exceeds this value then a alert will
be issued and the I(thin_volume_expansion_policy) will be executed.
- Values must be between or equal to 10 and 99.
default: 95
owning_controller:
description:
- Specifies which controller will be the primary owner of the volume
- Not specifying will allow the controller to choose ownership.
required: false
choices: ["A", "B"]
ssd_cache_enabled:
description:
- Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
- The default value is to ignore existing SSD cache setting.
type: bool
default: false
data_assurance_enabled:
description:
- Determines whether data assurance (DA) should be enabled for the volume
- Only available when creating a new volume and on a storage pool with drives supporting the DA capability.
type: bool
default: false
read_cache_enable:
description:
- Indicates whether read caching should be enabled for the volume.
type: bool
default: true
read_ahead_enable:
description:
- Indicates whether or not automatic cache read-ahead is enabled.
- This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot
benefit from read ahead caching.
type: bool
default: true
write_cache_enable:
description:
- Indicates whether write-back caching should be enabled for the volume.
type: bool
default: true
cache_without_batteries:
description:
- Indicates whether caching should be used without battery backup.
- Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost!
type: bool
default: false
workload_name:
description:
- Label for the workload defined by the metadata.
- When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage
array.
- When I(workload_name) exists on the storage array but the metadata is different then the workload
definition will be updated. (Changes will update all associated volumes!)
- Existing workloads can be retrieved using M(netapp_e_facts).
required: false
metadata:
description:
- Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily
defined for whatever the user deems useful)
- When I(workload_name) exists on the storage array but the metadata is different then the workload
definition will be updated. (Changes will update all associated volumes!)
- I(workload_name) must be specified when I(metadata) are defined.
type: dict
required: false
wait_for_initialization:
description:
- Forces the module to wait for expansion operations to complete before continuing.
type: bool
default: false
initialization_timeout:
description:
- Duration in seconds before the wait_for_initialization operation will terminate.
- M(wait_for_initialization==True) to have any effect on module's operations.
type: int
required: false
'''
EXAMPLES = """
- name: Create simple volume with workload tags (volume meta data)
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume
storage_pool_name: storage_pool
size: 300
size_unit: gb
workload_name: volume_tag
metadata:
key1: value1
key2: value2
- name: Create a thin volume
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume1
storage_pool_name: storage_pool
size: 131072
size_unit: gb
thin_provision: true
thin_volume_repo_size: 32
thin_volume_max_repo_size: 1024
- name: Expand thin volume's virtual size
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume1
storage_pool_name: storage_pool
size: 262144
size_unit: gb
thin_provision: true
thin_volume_repo_size: 32
thin_volume_max_repo_size: 1024
- name: Expand thin volume's maximum repository size
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
name: volume1
storage_pool_name: storage_pool
size: 262144
size_unit: gb
thin_provision: true
thin_volume_repo_size: 32
thin_volume_max_repo_size: 2048
- name: Delete volume
netapp_e_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: absent
name: volume
"""
RETURN = """
msg:
description: State of volume
type: str
returned: always
sample: "Standard volume [workload_vol_1] has been created."
"""
from time import sleep
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
from ansible.module_utils._text import to_native
class NetAppESeriesVolume(NetAppESeriesModule):
VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300
def __init__(self):
ansible_options = dict(
state=dict(required=True, choices=["present", "absent"]),
name=dict(required=True, type="str"),
storage_pool_name=dict(type="str"),
size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
type="str"),
size=dict(type="float"),
segment_size_kb=dict(type="int", default=128),
owning_controller=dict(required=False, choices=['A', 'B']),
ssd_cache_enabled=dict(type="bool", default=False),
data_assurance_enabled=dict(type="bool", default=False),
thin_provision=dict(type="bool", default=False),
thin_volume_repo_size=dict(type="int"),
thin_volume_max_repo_size=dict(type="float"),
thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"]),
thin_volume_growth_alert_threshold=dict(type="int", default=95),
read_cache_enable=dict(type="bool", default=True),
read_ahead_enable=dict(type="bool", default=True),
write_cache_enable=dict(type="bool", default=True),
cache_without_batteries=dict(type="bool", default=False),
workload_name=dict(type="str", required=False),
metadata=dict(type="dict", required=False),
wait_for_initialization=dict(type="bool", default=False),
initialization_timeout=dict(type="int", required=False))
required_if = [
["state", "present", ["storage_pool_name", "size"]],
["thin_provision", "true", ["thin_volume_repo_size"]]
]
super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True,
required_if=required_if)
args = self.module.params
self.state = args["state"]
self.name = args["name"]
self.storage_pool_name = args["storage_pool_name"]
self.size_unit = args["size_unit"]
self.segment_size_kb = args["segment_size_kb"]
if args["size"]:
self.size_b = self.convert_to_aligned_bytes(args["size"])
self.owning_controller_id = None
if args["owning_controller"]:
self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002"
self.read_cache_enable = args["read_cache_enable"]
self.read_ahead_enable = args["read_ahead_enable"]
self.write_cache_enable = args["write_cache_enable"]
self.ssd_cache_enabled = args["ssd_cache_enabled"]
self.cache_without_batteries = args["cache_without_batteries"]
self.data_assurance_enabled = args["data_assurance_enabled"]
self.thin_provision = args["thin_provision"]
self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"]
self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"])
self.thin_volume_repo_size_b = None
self.thin_volume_max_repo_size_b = None
if args["thin_volume_repo_size"]:
self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"])
if args["thin_volume_max_repo_size"]:
self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"])
self.workload_name = args["workload_name"]
self.metadata = args["metadata"]
self.wait_for_initialization = args["wait_for_initialization"]
self.initialization_timeout = args["initialization_timeout"]
# convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to
# each of the workload attributes dictionary entries
metadata = []
if self.metadata:
if not self.workload_name:
self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified."
" Array [%s]." % self.ssid)
for key in self.metadata.keys():
metadata.append(dict(key=key, value=self.metadata[key]))
self.metadata = metadata
if self.thin_provision:
if not self.thin_volume_max_repo_size_b:
self.thin_volume_max_repo_size_b = self.size_b
if not self.thin_volume_expansion_policy:
self.thin_volume_expansion_policy = "automatic"
if self.size_b > 256 * 1024 ** 4:
self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size."
" Attempted size [%sg]" % (self.size_b * 1024 ** 3))
if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and
self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b):
self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum"
" repository size. Array [%s]." % self.ssid)
if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99:
self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99."
"thin_volume_growth_alert_threshold [%s]. Array [%s]."
% (self.thin_volume_growth_alert_threshold, self.ssid))
self.volume_detail = None
self.pool_detail = None
self.workload_id = None
def convert_to_aligned_bytes(self, size):
"""Convert size to the truncated byte size that aligns on the segment size."""
size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit])
segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"])
segment_count = int(size_bytes / segment_size_bytes)
return segment_count * segment_size_bytes
def get_volume(self):
"""Retrieve volume details from storage array."""
volumes = list()
thin_volumes = list()
try:
rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]."
% (self.ssid, to_native(err)))
try:
rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]."
% (self.ssid, to_native(err)))
volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name]
return volume_detail[0] if volume_detail else dict()
def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5):
"""Waits until volume becomes available.
:raises AnsibleFailJson when retries are exhausted.
"""
if retries == 0:
self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]."
% (self.name, self.ssid))
if not self.get_volume():
sleep(5)
self.wait_for_volume_availability(retries=retries - 1)
def wait_for_volume_action(self, timeout=None):
"""Waits until volume action is complete is complete.
:param: int timeout: Wait duration measured in seconds. Waits indefinitely when None.
"""
action = "unknown"
percent_complete = None
while action != "complete":
sleep(5)
try:
rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid)
# Search long lived operations for volume
action = "complete"
for operation in operations["longLivedOpsProgress"]:
if operation["volAction"] is not None:
for key in operation.keys():
if (operation[key] is not None and "volumeRef" in operation[key] and
(operation[key]["volumeRef"] == self.volume_detail["id"] or
("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))):
action = operation["volAction"]
percent_complete = operation["init"]["percentComplete"]
except Exception as err:
self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]."
" Error[%s]." % (self.name, self.ssid, to_native(err)))
if timeout is not None:
if timeout <= 0:
self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining"
" [%s]. Array Id [%s]." % (action, percent_complete, self.ssid))
self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid))
if timeout:
timeout -= 5
self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete))
self.module.log("Expansion action is complete.")
def get_storage_pool(self):
"""Retrieve storage pool details from the storage array."""
storage_pools = list()
try:
rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]."
% (self.ssid, to_native(err)))
pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name]
return pool_detail[0] if pool_detail else dict()
def check_storage_pool_sufficiency(self):
"""Perform a series of checks as to the sufficiency of the storage pool for the volume."""
if not self.pool_detail:
self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
if not self.volume_detail:
if self.thin_provision and not self.pool_detail['diskPool']:
self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.')
if (self.data_assurance_enabled and not
(self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and
self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")):
self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible."
" Array [%s]." % self.ssid)
if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision:
self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
" Array [%s]." % self.ssid)
else:
# Check for expansion
if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and
not self.thin_provision):
self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
" Array [%s]." % self.ssid)
def update_workload_tags(self, check_mode=False):
"""Check the status of the workload tag and update storage array definitions if necessary.
When the workload attributes are not provided but an existing workload tag name is, then the attributes will be
used.
:return bool: Whether changes were required to be made."""
change_required = False
workload_tags = None
request_body = None
ansible_profile_id = None
if self.workload_name:
try:
rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid)
# Generate common indexed Ansible workload tag
current_tag_index_list = [int(pair["value"].replace("ansible_workload_", ""))
for tag in workload_tags for pair in tag["workloadAttributes"]
if pair["key"] == "profileId" and "ansible_workload_" in pair["value"] and
str(pair["value"]).replace("ansible_workload_", "").isdigit()]
tag_index = 1
if current_tag_index_list:
tag_index = max(current_tag_index_list) + 1
ansible_profile_id = "ansible_workload_%d" % tag_index
request_body = dict(name=self.workload_name,
profileId=ansible_profile_id,
workloadInstanceIndex=None,
isValid=True)
# evaluate and update storage array when needed
for tag in workload_tags:
if tag["name"] == self.workload_name:
self.workload_id = tag["id"]
if not self.metadata:
break
# Determine if core attributes (everything but profileId) is the same
metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata)
tag_set = set(tuple(sorted(attr.items()))
for attr in tag["workloadAttributes"] if attr["key"] != "profileId")
if metadata_set != tag_set:
self.module.log("Workload tag change is required!")
change_required = True
# only perform the required action when check_mode==False
if change_required and not check_mode:
self.metadata.append(dict(key="profileId", value=ansible_profile_id))
request_body.update(dict(isNewWorkloadInstance=False,
isWorkloadDataInitialized=True,
isWorkloadCardDataToBeReset=True,
workloadAttributes=self.metadata))
try:
rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]),
data=request_body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
% (self.ssid, to_native(error)))
self.module.log("Workload tag [%s] required change." % self.workload_name)
break
# existing workload tag not found so create new workload tag
else:
change_required = True
self.module.log("Workload tag creation is required!")
if change_required and not check_mode:
if self.metadata:
self.metadata.append(dict(key="profileId", value=ansible_profile_id))
else:
self.metadata = [dict(key="profileId", value=ansible_profile_id)]
request_body.update(dict(isNewWorkloadInstance=True,
isWorkloadDataInitialized=False,
isWorkloadCardDataToBeReset=False,
workloadAttributes=self.metadata))
try:
rc, resp = self.request("storage-systems/%s/workloads" % self.ssid,
method="POST", data=request_body)
self.workload_id = resp["id"]
except Exception as error:
self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
% (self.ssid, to_native(error)))
self.module.log("Workload tag [%s] was added." % self.workload_name)
return change_required
def get_volume_property_changes(self):
"""Retrieve the volume update request body when change(s) are required.
:raise AnsibleFailJson when attempting to change segment size on existing volume.
:return dict: request body when change(s) to a volume's properties are required.
"""
change = False
request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[],
cacheSettings=dict(readCacheEnable=self.read_cache_enable,
writeCacheEnable=self.write_cache_enable))
# check for invalid modifications
if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]):
self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified."
% self.volume_detail["segmentSize"])
# common thick/thin volume properties
if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or
self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or
self.ssd_cache_enabled != self.volume_detail["flashCached"]):
change = True
# controller ownership
if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]:
change = True
request_body.update(dict(owningControllerId=self.owning_controller_id))
if self.workload_name:
request_body.update(dict(metaTags=[dict(key="workloadId", value=self.workload_id),
dict(key="volumeTypeId", value="volume")]))
if {"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"]:
change = True
elif self.volume_detail["metadata"]:
change = True
# thick/thin volume specific properties
if self.thin_provision:
if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]):
change = True
request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold))
if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]:
change = True
request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy))
else:
if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0):
change = True
request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable))
if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]:
change = True
request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries))
return request_body if change else dict()
def get_expand_volume_changes(self):
"""Expand the storage specifications for the existing thick/thin volume.
:raise AnsibleFailJson when a thick/thin volume expansion request fails.
:return dict: dictionary containing all the necessary values for volume expansion request
"""
request_body = dict()
if self.size_b < int(self.volume_detail["capacity"]):
self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]"
% (self.name, self.ssid))
if self.volume_detail["thinProvisioned"]:
if self.size_b > int(self.volume_detail["capacity"]):
request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b))
self.module.log("Thin volume virtual size have been expanded.")
if self.volume_detail["expansionPolicy"] == "automatic":
if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]):
request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b))
self.module.log("Thin volume maximum repository size have been expanded (automatic policy).")
elif self.volume_detail["expansionPolicy"] == "manual":
if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]):
change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"])
if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0:
self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb"
" and 256gb in increments of 4gb. Attempted size [%sg]."
% (self.thin_volume_repo_size_b * 1024 ** 3))
request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b))
self.module.log("Thin volume maximum repository size have been expanded (manual policy).")
elif self.size_b > int(self.volume_detail["capacity"]):
request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b))
self.module.log("Volume storage capacities have been expanded.")
return request_body
def create_volume(self):
"""Create thick/thin volume according to the specified criteria."""
body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes",
dataAssuranceEnabled=self.data_assurance_enabled)
if self.thin_provision:
body.update(dict(virtualSize=self.size_b,
repositorySize=self.thin_volume_repo_size_b,
maximumRepositorySize=self.thin_volume_max_repo_size_b,
expansionPolicy=self.thin_volume_expansion_policy,
growthAlertThreshold=self.thin_volume_growth_alert_threshold))
try:
rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("New thin volume created [%s]." % self.name)
else:
body.update(dict(size=self.size_b, segSize=self.segment_size_kb))
try:
rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("New volume created [%s]." % self.name)
def update_volume_properties(self):
"""Update existing thin-volume or volume properties.
:raise AnsibleFailJson when either thick/thin volume update request fails.
:return bool: whether update was applied
"""
self.wait_for_volume_availability()
self.volume_detail = self.get_volume()
request_body = self.get_volume_property_changes()
if request_body:
if self.thin_provision:
try:
rc, resp = self.request("storage-systems/%s/thin-volumes/%s"
% (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]."
" Error[%s]." % (self.name, self.ssid, to_native(error)))
else:
try:
rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
data=request_body, method="POST")
except Exception as error:
self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]."
" Error[%s]." % (self.name, self.ssid, to_native(error)))
return True
return False
def expand_volume(self):
"""Expand the storage specifications for the existing thick/thin volume.
:raise AnsibleFailJson when a thick/thin volume expansion request fails.
"""
request_body = self.get_expand_volume_changes()
if request_body:
if self.volume_detail["thinProvisioned"]:
try:
rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand"
% (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
except Exception as err:
self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(err)))
self.module.log("Thin volume specifications have been expanded.")
else:
try:
rc, resp = self.request(
"storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']),
data=request_body, method="POST")
except Exception as err:
self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(err)))
self.module.log("Volume storage capacities have been expanded.")
def delete_volume(self):
"""Delete existing thin/thick volume."""
if self.thin_provision:
try:
rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]),
method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("Thin volume deleted [%s]." % self.name)
else:
try:
rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
method="DELETE")
except Exception as error:
self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]."
% (self.name, self.ssid, to_native(error)))
self.module.log("Volume deleted [%s]." % self.name)
def apply(self):
"""Determine and apply any changes necessary to satisfy the specified criteria.
:raise AnsibleExitJson when completes successfully"""
change = False
msg = None
self.volume_detail = self.get_volume()
self.pool_detail = self.get_storage_pool()
# Determine whether changes need to be applied to existing workload tags
if self.state == 'present' and self.update_workload_tags(check_mode=True):
change = True
# Determine if any changes need to be applied
if self.volume_detail:
if self.state == 'absent':
change = True
elif self.state == 'present':
if self.get_expand_volume_changes() or self.get_volume_property_changes():
change = True
elif self.state == 'present':
if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or
self.thin_volume_repo_size_b > 256 * 1024 ** 3 or
self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0):
self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in"
" increments of 4gb. Attempted size [%sg]."
% (self.thin_volume_repo_size_b * 1024 ** 3))
change = True
self.module.log("Update required: [%s]." % change)
# Apply any necessary changes
if change and not self.module.check_mode:
if self.state == 'present':
if self.update_workload_tags():
msg = "Workload tag change occurred."
if not self.volume_detail:
self.check_storage_pool_sufficiency()
self.create_volume()
self.update_volume_properties()
msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created."
else:
if self.update_volume_properties():
msg = "Volume [%s] properties were updated."
if self.get_expand_volume_changes():
self.expand_volume()
msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded."
if self.wait_for_initialization:
self.module.log("Waiting for volume operation to complete.")
self.wait_for_volume_action(timeout=self.initialization_timeout)
elif self.state == 'absent':
self.delete_volume()
msg = "Volume [%s] has been deleted."
else:
msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists."
self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change)
def main():
volume = NetAppESeriesVolume()
volume.apply()
if __name__ == '__main__':
main()

View file

@ -1,400 +0,0 @@
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_volume_copy
short_description: NetApp E-Series create volume copy pairs
description:
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- community.general.netapp.eseries
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API, for example C(https://prod-1.wahoo.acme.com/devmgr/v2).
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
source_volume_id:
description:
- The id of the volume copy source.
- If used, must be paired with destination_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
destination_volume_id:
description:
- The id of the volume copy destination.
- If used, must be paired with source_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
volume_copy_pair_id:
description:
- The id of a given volume copy pair
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
- Can use to delete or check presence of volume pairs
- Must specify this or (destination_volume_id and source_volume_id)
state:
description:
- Whether the specified volume copy pair should exist or not.
required: True
choices: ['present', 'absent']
create_copy_pair_if_does_not_exist:
description:
- Defines if a copy pair will be created if it does not exist.
- If set to True destination_volume_id and source_volume_id are required.
type: bool
default: True
start_stop_copy:
description:
- starts a re-copy or stops a copy in progress
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
- Requires volume_copy_pair_id
search_volume_id:
description:
- Searches for all valid potential target and source volumes that could be used in a copy_pair
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
'''
RESULTS = """
"""
EXAMPLES = """
---
msg:
description: Success message
returned: success
type: str
sample: Json facts for the volume copy that was created.
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: Created Volume Copy Pair with ID
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
volume_copy_pair_id = None
for potential_copy_pair in resp:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
volume_copy_pair_id = potential_copy_pair['id']
return volume_copy_pair_id
def create_copy_pair(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
rData = {
"sourceId": params['source_volume_id'],
"targetId": params['destination_volume_id']
}
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def delete_copy_pair_by_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 204:
return False, (rc, resp)
else:
return True, (rc, resp)
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def start_stop_copy(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
return True, response_data[0]['percentComplete']
else:
return False, response_data
def check_copy_status(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
if response_data['percentComplete'] != -1:
return True, response_data['percentComplete']
else:
return False, response_data['percentComplete']
else:
return False, response_data
def find_valid_copy_pair_targets_and_sources(params):
get_status = 'storage-systems/%s/volumes' % params['ssid']
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
source_capacity = None
candidates = []
for volume in response_data:
if volume['id'] == params['search_volume_id']:
source_capacity = volume['capacity']
else:
candidates.append(volume)
potential_sources = []
potential_targets = []
for volume in candidates:
if volume['capacity'] > source_capacity:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_targets.append(volume['id'])
else:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_sources.append(volume['id'])
return potential_targets, potential_sources
else:
raise Exception("Response [%s]" % response_code)
def main():
module = AnsibleModule(argument_spec=dict(
source_volume_id=dict(type='str'),
destination_volume_id=dict(type='str'),
copy_priority=dict(required=False, default=0, type='int'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
targetWriteProtected=dict(required=False, default=True, type='bool'),
onlineCopy=dict(required=False, default=False, type='bool'),
volume_copy_pair_id=dict(type='str'),
status=dict(required=True, choices=['present', 'absent'], type='str'),
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
search_volume_id=dict(type='str'),
),
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
['volume_copy_pair_id', 'source_volume_id'],
['volume_copy_pair_id', 'search_volume_id'],
['search_volume_id', 'destination_volume_id'],
['search_volume_id', 'source_volume_id'],
],
required_together=[['source_volume_id', 'destination_volume_id'],
],
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
]
)
params = module.params
if not params['api_url'].endswith('/'):
params['api_url'] += '/'
# Check if we want to search
if params['search_volume_id'] is not None:
try:
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
except Exception as e:
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % to_native(e))
module.exit_json(changed=False,
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
search_volume_id=params['search_volume_id'],
valid_targets=potential_targets,
valid_sources=potential_sources)
# Check if we want to start or stop a copy operation
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
# Get the current status info
currenty_running, status_info = check_copy_status(params)
# If we want to start
if params['start_stop_copy'] == 'start':
# If we have already started
if currenty_running is True:
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
# If we need to start
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
else:
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
# If we want to stop
else:
# If it has already stopped
if currenty_running is False:
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
# If we need to stop it
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
# If we want the copy pair to exist we do this stuff
if params['status'] == 'present':
# We need to check if it exists first
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# If no volume copy pair is found we need need to make it.
if params['volume_copy_pair_id'] is None:
# In order to create we can not do so with just a volume_copy_pair_id
copy_began_status, (rc, resp) = create_copy_pair(params)
if copy_began_status is True:
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
else:
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
# If it does exist we do nothing
else:
# We verify that it exists
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
params)
if exist_status:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
else:
if exist_status_code == 404:
module.fail_json(
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
exist_status_code, exist_status_data))
module.fail_json(msg="Done")
# If we want it to not exist we do this
else:
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# We delete it by the volume_copy_pair_id
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
if delete_status is True:
module.exit_json(changed=True,
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
else:
if delete_status_code == 404:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
delete_status_code, delete_status_data))
if __name__ == '__main__':
main()

View file

@ -21,7 +21,7 @@ deprecated:
alternative: please use M(na_elementsw_account)
short_description: Manage SolidFire accounts
extends_documentation_fragment:
- community.general.netapp.solidfire
- community.general._netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
@ -104,7 +104,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()

View file

@ -21,7 +21,7 @@ deprecated:
alternative: please use M(na_elementsw_check_connections)
short_description: Check connectivity to MVIP and SVIP.
extends_documentation_fragment:
- community.general.netapp.solidfire
- community.general._netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
@ -63,7 +63,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()

View file

@ -20,7 +20,7 @@ deprecated:
alternative: please use M(na_elementsw_snapshot_schedule)
short_description: Manage SolidFire snapshot schedules
extends_documentation_fragment:
- community.general.netapp.solidfire
- community.general._netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
@ -138,7 +138,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()

View file

@ -21,7 +21,7 @@ deprecated:
alternative: please use M(na_elementsw_access_group)
short_description: Manage SolidFire Volume Access Groups
extends_documentation_fragment:
- community.general.netapp.solidfire
- community.general._netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
@ -102,7 +102,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()

View file

@ -21,7 +21,7 @@ deprecated:
alternative: please use M(na_elementsw_volume)
short_description: Manage SolidFire volumes
extends_documentation_fragment:
- community.general.netapp.solidfire
- community.general._netapp.solidfire
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
@ -131,7 +131,7 @@ msg:
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 10.113.1.111:8443
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,39 +0,0 @@
# Test code for the netapp_e_iscsi_interface module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
- name: NetApp Test ASUP module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined
or netapp_e_api_host is undefined or netapp_e_ssid is undefined
vars:
defaults: &defaults
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
state: enabled
server: mail.example.com
sender: noreply@example.com
recipients:
- noreply@example.com
- name: set default vars
set_fact:
vars: *defaults
- name: Set the initial alerting settings
netapp_e_alerts:
<<: *defaults
register: result
- name: Validate the idempotency of the module
netapp_e_alerts:
<<: *defaults
register: result
- name: Ensure we still have the same settings, but had no change
assert:
that: not result.changed

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 10.113.1.111:8443
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,233 +0,0 @@
# Test code for the netapp_e_iscsi_interface module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
- name: NetApp Test ASUP module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined
or netapp_e_api_host is undefined or netapp_e_ssid is undefined
vars: &vars
credentials: &creds
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
- name: set credentials
set_fact:
credentials: *creds
- name: Show some debug information
debug:
msg: "Using user={{ credentials.api_username }} on server={{ credentials.api_url }}."
# ****************************************************
# *** Enable auto-support using all default values ***
# ****************************************************
- name: Enable auto-support using default values
netapp_e_asup:
<<: *creds
verbose: true
- name: Collect auto-support state information from the array
uri:
url: "{{ credentials.api_url }}/device-asup"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- name: Validate auto-support expected default state
assert:
that: "{{ current.json.asupEnabled and
current.json.onDemandEnabled and
current.json.remoteDiagsEnabled and
current.json.schedule.dailyMinTime == 0 and
current.json.schedule.dailyMaxTime == 1439 }}"
msg: "Unexpected auto-support state"
- name: Validate auto-support schedule
assert:
that: "{{ item in current.json.schedule.daysOfWeek }}"
msg: "{{ item }} is missing from the schedule"
loop: "{{ lookup('list', ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']) }}"
# ****************************
# *** Disable auto-support ***
# ****************************
- name: Disable auto-support
netapp_e_asup:
<<: *creds
state: disabled
- name: Collect auto-support state information from the array
uri:
url: "{{ credentials.api_url }}/device-asup"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- name: Validate auto-support is disabled
assert:
that: "{{ not current.json.asupEnabled }}"
msg: "Auto-support failed to be disabled"
# ****************************************************
# *** Enable auto-support using specific values ***
# ****************************************************
- name: Enable auto-support using specific values
netapp_e_asup:
<<: *creds
state: enabled
active: true
start: 22
end: 24
days:
- friday
- saturday
verbose: true
- name: Collect auto-support state information from the array
uri:
url: "{{ credentials.api_url }}/device-asup"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- name: Validate auto-support expected state
assert:
that: "{{ current.json.asupEnabled and
current.json.onDemandEnabled and
current.json.remoteDiagsEnabled and
current.json.schedule.dailyMinTime == (22 * 60) and
current.json.schedule.dailyMaxTime == (24 * 60 - 1) }}"
msg: "Unexpected auto-support state"
- name: Validate auto-support schedule
assert:
that: "{{ item in current.json.schedule.daysOfWeek }}"
msg: "{{ item }} is missing from the schedule"
loop: "{{ lookup('list', ['friday', 'saturday']) }}"
# ***********************************
# *** Alter auto-support schedule ***
# ***********************************
- name: Auto auto-support schedule
netapp_e_asup:
<<: *creds
state: enabled
active: true
start: 0
end: 5
days:
- monday
- thursday
- sunday
verbose: true
- name: Collect auto-support state information from the array
uri:
url: "{{ credentials.api_url }}/device-asup"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- name: Validate auto-support expected state
assert:
that: "{{ current.json.asupEnabled and
current.json.onDemandEnabled and
current.json.remoteDiagsEnabled and
current.json.schedule.dailyMinTime == (0 * 60) and
current.json.schedule.dailyMaxTime == (5 * 60) }}"
msg: "Unexpected auto-support state"
- name: Validate auto-support schedule
assert:
that: "{{ item in current.json.schedule.daysOfWeek }}"
msg: "{{ item }} is missing from the schedule"
loop: "{{ lookup('list', ['monday', 'thursday', 'sunday']) }}"
# *************************************************************
# *** Repeat previous test to verify state remains the same ***
# *************************************************************
- name: Repeat auto-support schedule change to verify idempotency
netapp_e_asup:
<<: *creds
state: enabled
active: true
start: 0
end: 5
days:
- monday
- thursday
- sunday
verbose: true
register: result
- name: Collect auto-support state information from the array
uri:
url: "{{ credentials.api_url }}/device-asup"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- name: Validate auto-support expected state
assert:
that: "{{ current.json.asupEnabled and
current.json.onDemandEnabled and
current.json.remoteDiagsEnabled and
current.json.schedule.dailyMinTime == (0 * 60) and
current.json.schedule.dailyMaxTime == (5 * 60) }}"
msg: "Unexpected auto-support state"
- name: Validate auto-support schedule
assert:
that: "{{ item in current.json.schedule.daysOfWeek }}"
msg: "{{ item }} is missing from the schedule"
loop: "{{ lookup('list', ['monday', 'thursday', 'sunday']) }}"
- name: Validate change was not detected
assert:
that: "{{ not result.changed }}"
msg: "Invalid change was detected"
# ***********************************
# *** Disable auto-support active ***
# ***********************************
- name: Auto auto-support schedule
netapp_e_asup:
<<: *creds
state: enabled
active: false
start: 0
end: 5
days:
- monday
- thursday
- sunday
verbose: true
- name: Collect auto-support state information from the array
uri:
url: "{{ credentials.api_url }}/device-asup"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- name: Validate auto-support expected state
assert:
that: "{{ current.json.asupEnabled and not current.json.onDemandEnabled and not current.json.remoteDiagsEnabled }}"
msg: "Unexpected auto-support state"

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 192.168.1.1000
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,209 +0,0 @@
# Test code for the netapp_e_iscsi_interface module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Existing symbol issue: occasionally symbol will return 422 which causes Ansible to fail; however the drive firmware download will complete.
# Work-around: Remove all storage provisioning before commencing test.
- name: NetApp Test ASUP module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined
or netapp_e_api_host is undefined or netapp_e_ssid is undefined
- set_fact:
firmware:
downgrade:
list:
- "/home/swartzn/Downloads/drive firmware/D_PX04SVQ160_DOWNGRADE_MS00toMSB6_801.dlp"
- "/home/swartzn/Downloads/drive firmware/D_ST1200MM0017_DNGRADE_MS02toMS00_6600_802.dlp"
check:
- firmware: "D_PX04SVQ160_DOWNGRADE_MS00toMSB6_801.dlp"
drive: "PX04SVQ160"
version: "MSB6"
- firmware: "D_ST1200MM0017_DNGRADE_MS02toMS00_6600_802.dlp"
drive: "ST1200MM0017"
version: "MS00"
upgrade:
list:
- "/home/swartzn/Downloads/drive firmware/D_PX04SVQ160_30603183_MS00_6600_001.dlp"
- "/home/swartzn/Downloads/drive firmware/D_ST1200MM0017_30602214_MS02_5600_002.dlp"
check:
- firmware: "D_PX04SVQ160_30603183_MS00_6600_001.dlp"
drive: "PX04SVQ160"
version: "MS00"
- firmware: "D_ST1200MM0017_30602214_MS02_5600_002.dlp"
drive: "ST1200MM0017"
version: "MS02"
- name: Set drive firmware (baseline, maybe change)
netapp_e_drive_firmware:
api_url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: false
firmware: "{{ firmware['downgrade']['list'] }}"
wait_for_completion: true
ignore_inaccessible_drives: true
upgrade_drives_online: false
register: drive_firmware
- pause:
seconds: 60
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_ssid }}/drives"
user: "{{ netapp_e_api_username }}"
password: "{{ netapp_e_api_password }}"
validate_certs: no
register: current_drive_firmware
- name: Check if drive firmware is the expected versions
assert:
that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
(firmware['downgrade']['check'][0]['drive'] == item['productID'].strip() and
firmware['downgrade']['check'][0]['version'] == item['softwareVersion']) or
(firmware['downgrade']['check'][1]['drive'] == item['productID'].strip() and
firmware['downgrade']['check'][1]['version'] == item['softwareVersion']) }}"
msg: "Drive firmware failed to update all drives"
loop: "{{ lookup('list', current_drive_firmware['json']) }}"
- name: Set drive firmware (upgrade, change-checkmode)
netapp_e_drive_firmware:
api_url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: false
firmware: "{{ firmware['upgrade']['list'] }}"
wait_for_completion: true
ignore_inaccessible_drives: true
upgrade_drives_online: false
register: drive_firmware
check_mode: true
- pause:
seconds: 60
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_ssid }}/drives"
user: "{{ netapp_e_api_username }}"
password: "{{ netapp_e_api_password }}"
validate_certs: no
register: current_drive_firmware
- name: Validate change status
assert:
that: "{{ drive_firmware.changed }}"
msg: "Change status is incorrect."
- name: Check if drive firmware is the expected versions
assert:
that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
(firmware['downgrade']['check'][0]['drive'] == item['productID'].strip() and
firmware['downgrade']['check'][0]['version'] == item['softwareVersion']) or
(firmware['downgrade']['check'][1]['drive'] == item['productID'].strip() and
firmware['downgrade']['check'][1]['version'] == item['softwareVersion']) }}"
msg: "Drive firmware failed to update all drives"
loop: "{{ lookup('list', current_drive_firmware['json']) }}"
- name: Set drive firmware (upgrade, change)
netapp_e_drive_firmware:
api_url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: false
firmware: "{{ firmware['upgrade']['list'] }}"
wait_for_completion: true
ignore_inaccessible_drives: true
upgrade_drives_online: false
register: drive_firmware
- pause:
seconds: 60
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_ssid }}/drives"
user: "{{ netapp_e_api_username }}"
password: "{{ netapp_e_api_password }}"
validate_certs: no
register: current_drive_firmware
- name: Validate change status
assert:
that: "{{ drive_firmware.changed }}"
msg: "Change status is incorrect."
- name: Check if drive firmware is the expected versions
assert:
that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
(firmware['upgrade']['check'][0]['drive'] == item['productID'].strip() and
firmware['upgrade']['check'][0]['version'] == item['softwareVersion']) or
(firmware['upgrade']['check'][1]['drive'] == item['productID'].strip() and
firmware['upgrade']['check'][1]['version'] == item['softwareVersion']) }}"
msg: "Drive firmware failed to update all drives"
loop: "{{ lookup('list', current_drive_firmware['json']) }}"
- name: Set drive firmware (upgrade, no change)
netapp_e_drive_firmware:
api_url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: false
firmware: "{{ firmware['upgrade']['list'] }}"
wait_for_completion: true
ignore_inaccessible_drives: true
upgrade_drives_online: false
register: drive_firmware
- pause:
seconds: 60
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_ssid }}/drives"
user: "{{ netapp_e_api_username }}"
password: "{{ netapp_e_api_password }}"
validate_certs: no
register: current_drive_firmware
- name: Validate change status
assert:
that: "{{ not drive_firmware.changed }}"
msg: "Change status is incorrect."
- name: Check if drive firmware is the expected versions
assert:
that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
(firmware['upgrade']['check'][0]['drive'] == item['productID'].strip() and
firmware['upgrade']['check'][0]['version'] == item['softwareVersion']) or
(firmware['upgrade']['check'][1]['drive'] == item['productID'].strip() and
firmware['upgrade']['check'][1]['version'] == item['softwareVersion']) }}"
msg: "Drive firmware failed to update all drives"
loop: "{{ lookup('list', current_drive_firmware['json']) }}"
- name: Set drive firmware (downgrade, change)
netapp_e_drive_firmware:
api_url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: false
firmware: "{{ firmware['downgrade']['list'] }}"
wait_for_completion: true
ignore_inaccessible_drives: true
upgrade_drives_online: false
register: drive_firmware
- pause:
seconds: 60
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_ssid }}/drives"
user: "{{ netapp_e_api_username }}"
password: "{{ netapp_e_api_password }}"
validate_certs: no
register: current_drive_firmware
- name: Validate change status
assert:
that: "{{ drive_firmware.changed }}"
msg: "Change status is incorrect."
- name: Check if drive firmware is the expected versions
assert:
that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
(firmware['downgrade']['check'][0]['drive'] == item['productID'].strip() and
firmware['downgrade']['check'][0]['version'] == item['softwareVersion']) or
(firmware['downgrade']['check'][1]['drive'] == item['productID'].strip() and
firmware['downgrade']['check'][1]['version'] == item['softwareVersion']) }}"
msg: "Drive firmware failed to update all drives"
loop: "{{ lookup('list', current_drive_firmware['json']) }}"

View file

@ -1,15 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
netapp_e_embedded_api_host: 192.168.1.1
netapp_e_embedded_api_username: admin
netapp_e_embedded_api_password: adminPass
netapp_e_embedded_ssid: 1
netapp_e_proxy_api_host: 192.168.1.100
netapp_e_proxy_api_username: admin
netapp_e_proxy_api_password: adminPass
netapp_e_proxy_ssid: 10
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,348 +0,0 @@
# Test code for the netapp_e_firmware module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# TODO: MUST BE DOWNGRADE BEFORE EXECUTING INTEGRATION TO RCB_11.40.3R2_280x_5c7d81b3.dlp and N280X-842834-D02.dlp
# loadControllerFirmware_MT swartzn@10.113.1.250 /home/swartzn/Downloads/RCB_11.40.3R2_280x_5c7d81b3.dlp /home/swartzn/Downloads/N280X-842834-D02.dlp
# This integration test will validate upgrade functionality for firmware-only, firmware-and-nvsram, and check mode.
- name: NetApp Test ASUP module
fail:
msg: "Please define netapp_e_embedded_api_host, netapp_e_embedded_api_username, netapp_e_embedded_api_password, netapp_e_embedded_ssid,
netapp_e_proxy_api_host, netapp_e_proxy_api_username, netapp_e_proxy_api_password, and netapp_e_proxy_ssid."
when: "netapp_e_embedded_api_host is undefined or netapp_e_embedded_api_username is undefined or netapp_e_embedded_api_password is undefined or
netapp_e_embedded_ssid is undefined or netapp_e_proxy_api_host is undefined or netapp_e_proxy_api_username is undefined or
netapp_e_proxy_api_password is undefined or netapp_e_proxy_ssid is undefined"
- set_fact:
path: "/home/swartzn/Downloads/"
upgrades:
- firmware: "RCB_11.40.3R2_280x_5c7d81b3.dlp"
nvsram: "N280X-842834-D02.dlp"
expected_firmware_version: "08.42.30.05"
expected_nvsram_version: "N280X-842834-D02"
- firmware: "RCB_11.40.5_280x_5ceef00e.dlp"
nvsram: "N280X-842834-D02.dlp"
expected_firmware_version: "08.42.50.00"
expected_nvsram_version: "N280X-842834-D02"
- firmware: "RCB_11.50.2_280x_5ce8501f.dlp"
nvsram: "N280X-852834-D02.dlp"
expected_firmware_version: "08.52.00.00"
expected_nvsram_version: "N280X-852834-D02"
- name: Perform firmware upgrade using the Web Services REST API (checkmode-no change, firmware only)
netapp_e_firmware:
ssid: "{{ netapp_e_embedded_ssid }}"
api_url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_embedded_api_username }}"
api_password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
check_mode: true
register: netapp_e_firmware
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == False }}"
msg: "Failed to return unchanged."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
msg: "Unexpected firmware version."
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
msg: "Unexpected nvsram version."
- name: Perform firmware upgrade using the Web Services REST API (no change, firmware only)
netapp_e_firmware:
ssid: "{{ netapp_e_embedded_ssid }}"
api_url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_embedded_api_username }}"
api_password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
register: netapp_e_firmware
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == False }}"
msg: "Failed to return changed."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
msg: "Unexpected firmware version."
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
msg: "Unexpected nvsram version."
- name: Perform firmware upgrade using the Web Services REST API (checkmode-change, firmware)
netapp_e_firmware:
ssid: "{{ netapp_e_embedded_ssid }}"
api_url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_embedded_api_username }}"
api_password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[1]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[1]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
register: netapp_e_firmware
check_mode: true
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == True }}"
msg: "Failed to return changed."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
msg: "Unexpected firmware version."
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
msg: "Unexpected nvsram version."
- name: Perform firmware upgrade using the Web Services REST API (change, firmware)
netapp_e_firmware:
ssid: "{{ netapp_e_embedded_ssid }}"
api_url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_embedded_api_username }}"
api_password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[1]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[1]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
register: netapp_e_firmware
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_embedded_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_embedded_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_embedded_api_username }}"
password: "{{ netapp_e_embedded_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == True }}"
msg: "Failed to return changed."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[1]['expected_firmware_version'] }}"
msg: "Unexpected firmware version. {{ current_firmware['json'][0] }} != {{ upgrades[1]['expected_firmware_version'] }}"
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[1]['expected_nvsram_version'] }}"
msg: "Unexpected nvsram version. {{ current_nvsram['json'][0] }} != {{ upgrades[1]['expected_nvsram_version'] }}"
- name: Perform firmware upgrade using the Web Services REST API (changed, firmware)
netapp_e_firmware:
ssid: "{{ netapp_e_proxy_ssid }}"
api_url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_proxy_api_username }}"
api_password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
register: netapp_e_firmware
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == True }}"
msg: "Failed to return changed."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
msg: "Failed to change the firmware version."
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
msg: "Failed to change the nvsram version."
- name: Perform firmware upgrade using the Web Services REST API (checkmode-unchanged, firmware)
netapp_e_firmware:
ssid: "{{ netapp_e_proxy_ssid }}"
api_url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_proxy_api_username }}"
api_password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
check_mode: true
register: netapp_e_firmware
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == False }}"
msg: "Failed to return unchanged."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
msg: "Failed to change the firmware version."
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
msg: "Failed to change the nvsram version."
- name: Perform firmware upgrade using the Web Services REST API (checkmode-change, firmware and nvsram)
netapp_e_firmware:
ssid: "{{ netapp_e_proxy_ssid }}"
api_url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_proxy_api_username }}"
api_password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[2]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[2]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
check_mode: true
register: netapp_e_firmware
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == True }}"
msg: "Failed to return changed."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
msg: "Failed to change the firmware version."
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
msg: "Failed to change the nvsram version."
- name: Perform firmware upgrade using the Web Services REST API (changed, firmware and nvsram)
netapp_e_firmware:
ssid: "{{ netapp_e_proxy_ssid }}"
api_url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_e_proxy_api_username }}"
api_password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
nvsram: "{{ path }}{{ upgrades[2]['nvsram'] }}"
firmware: "{{ path }}{{ upgrades[2]['firmware'] }}"
wait_for_completion: true
ignore_health_check: true
register: netapp_e_firmware
- name: Retrieve current firmware version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_firmware
- name: Retrieve current nvsram version
uri:
url: "https://{{ netapp_e_proxy_api_host }}:8443/devmgr/v2/storage-systems/{{ netapp_e_proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
user: "{{ netapp_e_proxy_api_username }}"
password: "{{ netapp_e_proxy_api_password }}"
validate_certs: no
register: current_nvsram
- name: Verify change status
assert:
that: "{{ netapp_e_firmware.changed == True }}"
msg: "Failed to return changed."
- name: Verify current firmware version
assert:
that: "{{ current_firmware['json'][0] == upgrades[2]['expected_firmware_version'] }}"
msg: "Failed to change the firmware version."
- name: Verify current nvsram version
assert:
that: "{{ current_nvsram['json'][0] == upgrades[2]['expected_nvsram_version'] }}"
msg: "Failed to change the nvsram version."

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 10.113.1.111:8443
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,51 +0,0 @@
# Test code for the netapp_e_iscsi_interface module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
- name: NetApp Test Global Settings module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined
or netapp_e_api_host is undefined or netapp_e_ssid is undefined
vars: &vars
credentials: &creds
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
name: TestArray
- name: set credentials
set_fact:
credentials: *creds
- name: Show some debug information
debug:
msg: "Using user={{ credentials.api_username }} on server={{ credentials.api_url }}."
- name: Set the name to the default
netapp_e_global:
<<: *creds
- name: Set a few different names
netapp_e_global:
<<: *creds
name: "{{ item }}"
loop:
- a
- x
- "000001111122222333334444455555"
- name: Set an explicit name
netapp_e_global:
<<: *creds
name: abc
register: result
- name: Validate name
assert:
that: result.name == "abc"
- name: Restore the original name
netapp_e_global:
<<: *creds

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 192.168.1.1
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,276 +0,0 @@
---
# Test code for the netapp_e_host module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
- name: NetApp Test Host module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined or
netapp_e_api_host is undefined or netapp_e_ssid is undefined
vars:
gather_facts: yes
credentials: &creds
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
hosts: &hosts
1:
host_type: 27
update_host_type: 28
ports:
- type: 'iscsi'
label: 'I1_1'
port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe-PORT1'
- type: 'iscsi'
label: 'I1_2'
port: 'iqn.1996-04.de.suse:01:56f86f9bd1ff-port1'
ports2:
- type: 'iscsi'
label: 'I1_1'
port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe-port2'
- type: 'iscsi'
label: 'I1_2'
port: 'iqn.1996-04.de.suse:01:56f86f9bd1ff-port2'
- type: 'iscsi'
label: 'I1_3'
port: 'iqn.1996-04.redhat:01:56f86f9bd1fe-PORT1'
2:
host_type: 27
update_host_type: 28
ports:
- type: 'iscsi'
label: 'I2_1'
port: 'iqn.1996-04.redhat:01:56f86f9bd1fe-port1'
- type: 'iscsi'
label: 'I2_2'
port: 'iqn.1996-04.redhat:01:56f86f9bd1ff-port1'
ports2:
- type: 'iscsi'
label: 'I2_1'
port: 'iqn.1996-04.redhat:01:56f86f9bd1fe-port2'
- type: 'iscsi'
label: 'I2_2'
port: 'iqn.1996-04.redhat:01:56f86f9bd1ff-PORT2'
# ********************************************
# *** Ensure jmespath package is installed ***
# ********************************************
# NOTE: jmespath must be installed for the json_query filter
- name: Ensure that jmespath is installed
pip:
name: jmespath
state: present
register: jmespath
- fail:
msg: "Restart playbook, the jmespath package was installed and is need for the playbook's execution."
when: jmespath.changed
# *****************************************
# *** Set credential and host variables ***
# *****************************************
- name: Set hosts variable
set_fact:
hosts: *hosts
- name: set credentials
set_fact:
credentials: *creds
- name: Show some debug information
debug:
msg: "Using user={{ credentials.api_username }} on server={{ credentials.api_url }}."
# *** Remove any existing hosts to set initial state and verify state ***
- name: Remove any existing hosts
netapp_e_host:
<<: *creds
state: absent
name: "{{ item.key }}"
with_dict: *hosts
# Retrieve array host definitions
- name: HTTP request for all host definitions from array
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/hosts"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
# Verify that host 1 and 2 host objects do not exist
- name: Collect host side port labels
set_fact:
host_labels: "{{ result | json_query('json[*].label') }}"
- name: Assert hosts were removed
assert:
that: "'{{ item.key }}' not in host_labels"
msg: "Host, {{ item.key }}, failed to be removed from the hosts!"
loop: "{{ lookup('dict', hosts) }}"
# *****************************************************************
# *** Create host definitions and validate host object creation ***
# *****************************************************************
- name: Define hosts
netapp_e_host:
<<: *creds
state: present
host_type: "{{ item.value.host_type }}"
ports: "{{ item.value.ports }}"
name: "{{ item.key }}"
with_dict: *hosts
# Retrieve array host definitions
- name: https request to validate host definitions were created
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/hosts"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
# Verify hosts were indeed created
- name: Collect host label list
set_fact:
hosts_labels: "{{ result | json_query('json[*].label') }}"
- name: Validate hosts were in fact created
assert:
that: "'{{ item.key }}' in hosts_labels"
msg: "host, {{ item.key }}, not define on array!"
loop: "{{ lookup('dict', hosts) }}"
# *** Update with no state changes results in no changes ***
- name: Redefine hosts, expecting no changes
netapp_e_host:
<<: *creds
state: present
host_type: "{{ item.value.host_type }}"
ports: "{{ item.value.ports }}"
name: "{{ item.key }}"
with_dict: *hosts
register: result
# Verify that no changes occurred
- name: Ensure no change occurred
assert:
msg: "A change was not detected!"
that: "not result.changed"
# ***********************************************************************************
# *** Redefine hosts using ports2 host definitions and validate the updated state ***
# ***********************************************************************************
- name: Redefine hosts, expecting changes
netapp_e_host:
<<: *creds
state: present
host_type: "{{ item.value.host_type }}"
ports: "{{ item.value.ports2 }}"
name: "{{ item.key }}"
force_port: yes
with_dict: *hosts
register: result
# Request from the array all host definitions
- name: HTTP request for port information
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/hosts"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
# Compile a list of array host port information for verifying changes
- name: Compile array host port information list
set_fact:
tmp: []
# Append each loop to the previous extraction. Each loop consists of host definitions and the filters will perform
# the following: grab host side port lists; combine to each list a dictionary containing the host name(label);
# lastly, convert the zip_longest object into a list
- set_fact:
tmp: "{{ tmp }} + {{ [item | json_query('hostSidePorts[*]')] |
zip_longest([], fillvalue={'host_name': item.label}) | list }}"
loop: "{{ result.json }}"
# Make new list, port_info, by combining each list entry's dictionaries into a single dictionary
- name: Create port information list
set_fact:
port_info: []
- set_fact:
port_info: "{{ port_info + [item[0] |combine(item[1])] }}"
loop: "{{ tmp }}"
# Compile list of expected host port information for verifying changes
- name: Create expected port information list
set_fact:
tmp: []
# Append each loop to the previous extraction. Each loop consists of host definitions and the filters will perform
# the following: grab host side port lists; combine to each list a dictionary containing the host name(label);
# lastly, convert the zip_longest object into a list
- set_fact:
tmp: "{{ tmp }} + {{ [item | json_query('value.ports2[*]')]|
zip_longest([], fillvalue={'host_name': item.key|string}) | list }}"
loop: "{{ lookup('dict', hosts) }}"
# Make new list, expected_port_info, by combining each list entry's dictionaries into a single dictionary
- name: Create expected port information list
set_fact:
expected_port_info: []
- set_fact:
expected_port_info: "{{ expected_port_info + [ item[0] |combine(item[1]) ] }}"
loop: "{{ tmp }}"
# Verify that each host object has the expected protocol type and address/port
- name: Assert hosts information was updated with new port information
assert:
that: "{{ item[0].host_name != item[1].host_name or
item[0].label != item[1].label or
(item[0].type == item[1].type and
(item[0].address|regex_replace(':','')) == (item[1].port|regex_replace(':',''))) }}"
msg: "port failed to be updated!"
loop: "{{ query('nested', port_info, expected_port_info) }}"
# ****************************************************
# *** Remove any existing hosts and verify changes ***
# ****************************************************
- name: Remove any existing hosts
netapp_e_host:
<<: *creds
state: absent
name: "{{ item.key }}"
with_dict: *hosts
# Request all host object definitions
- name: HTTP request for all host definitions from array
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/hosts"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: results
# Collect port label information
- name: Collect host side port labels
set_fact:
host_side_port_labels: "{{ results | json_query('json[*].hostSidePorts[*].label') }}"
- name: Collect removed port labels
set_fact:
removed_host_side_port_labels: "{{ hosts | json_query('*.ports[*].label') }}"
# Verify host 1 and 2 objects were removed
- name: Assert hosts were removed
assert:
that: item not in host_side_port_labels
msg: "Host {{ item }} failed to be removed from the hosts!"
loop: "{{ removed_host_side_port_labels }}"

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 10.113.1.111:8443
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,448 +0,0 @@
---
# Test code for the netapp_e_iscsi_interface module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ***********************
# *** Local test data ***
# ***********************
- name: NetApp Test iSCSI Interface module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined
or netapp_e_api_host is undefined or netapp_e_ssid is undefined
vars:
credentials: &creds
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
array: &array
subnet: 255.255.255.0
gateway: 10.10.10.1
A:
- channel: 1
max_frame_size: 1500
- channel: 2
max_frame_size: 2000
- channel: 3
max_frame_size: 9000
- channel: 4
max_frame_size: 1500
- channel: 5
max_frame_size: 2000
- channel: 6
max_frame_size: 9000
B:
- channel: 1
max_frame_size: 9000
- channel: 2
max_frame_size: 1500
- channel: 3
max_frame_size: 2000
- channel: 4
max_frame_size: 9000
- channel: 5
max_frame_size: 1500
- channel: 6
max_frame_size: 2000
# ***************************************************
# *** Ensure python jmespath package is installed ***
# ***************************************************
- name: Ensure that jmespath is installed
pip:
name: jmespath
state: enabled
register: jmespath
- fail:
msg: "Restart playbook, the jmespath package was installed and is need for the playbook's execution."
when: jmespath.changed
# ************************************
# *** Set local playbook test data ***
# ************************************
- name: set credentials
set_fact:
credentials: *creds
- name: set array
set_fact:
array: *array
- name: Show some debug information
debug:
msg: "Using user={{ credentials.api_username }} on server={{ credentials.api_url }}."
# *****************************************
# *** Disable all controller A channels ***
# *****************************************
- name: Disable all controller A ports
netapp_e_iscsi_interface:
<<: *creds
controller: "A"
channel: "{{ item.channel }}"
state: disabled
loop: "{{ lookup('list', array.A) }}"
# Delay to give time for the asynchronous symbol call has complete
- pause:
seconds: 30
# Request all controller's iscsi host interface information
- name: Collect iscsi port information
uri:
url: "{{ xpath_filter_url }}?query=controller/hostInterfaces//iscsi"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
vars:
xpath_filter_url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/graph/xpath-filter"
# Extract controller A's port information from the iscsi host interfaces list
# Note: min filter is used because there are only two controller ids and the smaller corresponds with controller A
- name: Get controller A's controllerId
set_fact:
controller_a_id: "{{ result | json_query('json[*].controllerId') | min }}"
# Collect all port information associated with controller A
- name: Get controller A's port information
set_fact:
controller_a: "{{ result | json_query(controller_a_query) }}"
vars:
controller_a_query: "json[?controllerId=='{{ controller_a_id }}']"
# Confirm controller A's ports are disabled
- name: Verify all controller A ports are disabled
assert:
that: "{{ item.ipv4Enabled == false }}"
msg: "Controller A, channel {{ item.channel }} is not disabled"
loop: "{{ controller_a }}"
# *****************************************
# *** Disable all controller B channels ***
# *****************************************
- name: Disable all controller B ports
netapp_e_iscsi_interface:
<<: *creds
controller: "B"
channel: "{{ item.channel }}"
state: disabled
loop: "{{ lookup('list', array.B) }}"
# Delay to give time for the asynchronous symbol call has complete
- pause:
seconds: 30
# Request all controller's iscsi host interface information
- name: Collect iscsi port information
uri:
url: "{{ xpath_filter_url }}?query=controller/hostInterfaces//iscsi"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
vars:
xpath_filter_url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/graph/xpath-filter"
# Extract controller B's port information from the iscsi host interfaces list
# Note: min filter is used because there are only two controller ids and the smaller corresponds with controller B
- name: Get controller B's controllerId
set_fact:
controller_b_id: "{{ result | json_query('json[*].controllerId') | max }}"
# Collect all port information associated with controller B
- name: Get controller B's port information
set_fact:
controller_b: "{{ result | json_query(controller_b_query) }}"
vars:
controller_b_query: "json[?controllerId=='{{ controller_b_id }}']"
# Confirm controller B's ports are disabled
- name: Verify all controller B ports are disabled
assert:
that: "{{ item.ipv4Enabled == false }}"
msg: "Controller B, channel {{ item.channel }} is not disabled"
loop: "{{ controller_b }}"
# *****************************************************
# *** Configure all controller A's ports statically ***
# *****************************************************
- name: Configure controller A's port to use a static configuration method
netapp_e_iscsi_interface:
<<: *creds
controller: "A"
channel: "{{ item.channel }}"
state: enabled
config_method: static
address: "{{ array.gateway.split('.')[:3] | join('.') }}.{{ item.channel }}"
subnet_mask: "{{ array.subnet }}"
gateway: "{{ array.gateway }}"
max_frame_size: "{{ item.max_frame_size }}"
loop: "{{ lookup('list', array.A) }}"
# Delay to give time for the asynchronous symbol call has complete
- pause:
seconds: 30
# Request a list of iscsi host interfaces
- name: Collect array information
uri:
url: "{{ xpath_filter }}?query=controller/hostInterfaces//iscsi"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
vars:
xpath_filter: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/graph/xpath-filter"
# Extract controller A's port information from the iscsi host interfaces list
# Note: min filter is used because there are only two controller ids and the smaller corresponds with controller A
- name: Get controller A's controllerId
set_fact:
controller_a_id: "{{ result | json_query('json[*].controllerId') | min }}"
# Compile any iscsi port information associated with controller A
- name: Get controller A's port information
set_fact:
controller_a: "{{ result | json_query(controller_a_query) }}"
vars:
controller_a_query: "json[?controllerId=='{{ controller_a_id }}']"
# Confirm that controller A ports are statically defined with the expected MTU, gateway, subnet and ipv4 address
- name: Verify expected controller A's port configuration
assert:
that: "{{ item[0].channel != item[1].channel or
( item[0].ipv4Data.ipv4AddressConfigMethod == 'configStatic' and
item[0].interfaceData.ethernetData.maximumFramePayloadSize == item[1].max_frame_size and
item[0].ipv4Data.ipv4AddressData.ipv4GatewayAddress == array.gateway and
item[0].ipv4Data.ipv4AddressData.ipv4SubnetMask == array.subnet and
item[0].ipv4Data.ipv4AddressData.ipv4Address == partial_address + item[1].channel | string ) }}"
msg: "Failed to configure controller A, channel {{ item[0].channel }}"
loop: "{{ query('nested', lookup('list', controller_a), lookup('list', array.A) ) }}"
vars:
partial_address: "{{ array.gateway.split('.')[:3] | join('.') + '.' }}"
# *******************************************************************************************
# *** Configure controller B's channels for dhcp and specific frame maximum payload sizes ***
# *******************************************************************************************
- name: Configure controller B's ports to use dhcp with different MTU
netapp_e_iscsi_interface:
<<: *creds
controller: "B"
channel: "{{ item.channel }}"
state: enabled
config_method: dhcp
max_frame_size: "{{ item.max_frame_size }}"
loop: "{{ lookup('list', array.B) }}"
# Delay to give time for the asynchronous symbol call has complete
- pause:
seconds: 30
# request a list of iscsi host interfaces
- name: Collect array information
uri:
url: "{{ xpath_filter_url }}?query=controller/hostInterfaces//iscsi"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
vars:
xpath_filter_url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/graph/xpath-filter"
# Extract controller B's port information from the iscsi host interfaces list
# Note: max filter is used because there are only two controller ids and the larger corresponds with controller B
- name: Get controller B's controllerId
set_fact:
controller_b_id: "{{ result | json_query('json[*].controllerId') | max }}"
- name: Get controller B port information list
set_fact:
controller_b: "{{ result | json_query(controller_b_query) }}"
vars:
controller_b_query: "json[?controllerId=='{{ controller_b_id }}']"
# Using a nested loop of array information and expected information, verify that each channel has the appropriate max
# frame payload size and is configured for dhcp
- name: Verify expected controller B's port configuration
assert:
that: "{{ item[0].channel != item[1].channel or
( item[0].ipv4Data.ipv4AddressConfigMethod == 'configDhcp' and
item[0].interfaceData.ethernetData.maximumFramePayloadSize == item[1].max_frame_size ) }}"
msg: >
Failed to configure controller channel {{ item[0].channel }} for dhcp
and/or maximum frame size of {{ item[1].max_frame_size }}!
loop: "{{ query('nested', lookup('list', controller_b), lookup('list', array.B)) }}"
# *******************************************************************************************
# *** Configure controller A's channels for dhcp and specific frame maximum payload sizes ***
# *******************************************************************************************
- name: Configure controller A's ports to use dhcp with different MTU
netapp_e_iscsi_interface:
<<: *creds
controller: "A"
channel: "{{ item.channel }}"
state: enabled
config_method: dhcp
max_frame_size: "{{ item.max_frame_size }}"
loop: "{{ lookup('list', array.A) }}"
# Delay to give time for the asynchronous symbol call has complete
- pause:
seconds: 30
# Request a list of iscsi host interfaces
- name: Collect array information
uri:
url: "{{ xpath_filter_url }}?query=controller/hostInterfaces//iscsi"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
vars:
xpath_filter_url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/graph/xpath-filter"
# Extract controller A's port information from the iscsi host interfaces list
# Note: min filter is used because there are only two controller ids and the larger corresponds with controller A
- name: Get controller A's controllerId
set_fact:
controller_a_id: "{{ result | json_query('json[*].controllerId') | min }}"
- name: Get controller A port information list
set_fact:
controller_a: "{{ result | json_query(controller_a_query) }}"
vars:
controller_a_query: "json[?controllerId=='{{ controller_a_id }}']"
# Using a nested loop of array information and expected information, verify that each channel has the appropriate max
# frame payload size and is configured for dhcp
- name: Verify expected controller A's port configuration
assert:
that: "{{ item[0].channel != item[1].channel or
( item[0].ipv4Data.ipv4AddressConfigMethod == 'configDhcp' and
item[0].interfaceData.ethernetData.maximumFramePayloadSize == item[1].max_frame_size ) }}"
msg: >
Failed to configure controller channel {{ item[0].channel }} for dhcp
and/or maximum frame size of {{ item[1].max_frame_size }}!
loop: "{{ query('nested', lookup('list', controller_a), lookup('list', array.A)) }}"
# *****************************************************
# *** Configure all controller B's ports statically ***
# *****************************************************
- name: Configure controller B's ports to use a static configuration method
netapp_e_iscsi_interface:
<<: *creds
controller: "B"
channel: "{{ item.channel }}"
state: enabled
config_method: static
address: "{{ array.gateway.split('.')[:3] | join('.') }}.{{ item.channel }}"
subnet_mask: "{{ array.subnet }}"
gateway: "{{ array.gateway }}"
max_frame_size: "{{ item.max_frame_size }}"
loop: "{{ lookup('list', array.B) }}"
# Delay to give time for the asynchronous symbol call has complete
- pause:
seconds: 30
# request a list of iscsi host interfaces
- name: Collect array information
uri:
url: "{{ xpath_filter }}?query=controller/hostInterfaces//iscsi"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
vars:
xpath_filter: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/graph/xpath-filter"
# Extract controller B's port information from the iscsi host interfaces list
# Note: min filter is used because there are only two controller ids and the smaller corresponds with controller B
- name: Get controller B's controllerId
set_fact:
controller_b_id: "{{ result | json_query('json[*].controllerId') | max }}"
# Compile any iscsi port information associated with controller B
- name: Get controller B's port information
set_fact:
controller_b: "{{ result | json_query(controller_b_query) }}"
vars:
controller_b_query: "json[?controllerId=='{{ controller_b_id }}']"
# Confirm that controller B ports are statically defined with the expected MTU, gateway, subnet and ipv4 address
- name: Verify expected controller B's port configuration
assert:
that: "{{ item[0].channel != item[1].channel or
( item[0].ipv4Data.ipv4AddressConfigMethod == 'configStatic' and
item[0].interfaceData.ethernetData.maximumFramePayloadSize == item[1].max_frame_size and
item[0].ipv4Data.ipv4AddressData.ipv4GatewayAddress == array.gateway and
item[0].ipv4Data.ipv4AddressData.ipv4SubnetMask == array.subnet and
item[0].ipv4Data.ipv4AddressData.ipv4Address == partial_address + item[1].channel | string ) }}"
msg: "Failed to configure controller B, channel {{ item[0].channel }}"
loop: "{{ query('nested', lookup('list', controller_b), lookup('list', array.B) ) }}"
vars:
partial_address: "{{ array.gateway.split('.')[:3] | join('.') + '.' }}"
# **************************************
# *** Disable all controller B ports ***
# **************************************
- name: Disable all controller B's ports
netapp_e_iscsi_interface:
<<: *creds
controller: "B"
channel: "{{ item.channel }}"
state: disabled
loop: "{{ lookup('list', array.B) }}"
# Delay to give time for the asynchronous symbol call has complete
- pause:
seconds: 30
# Request controller iscsi host interface information
- name: Collect iscsi port information
uri:
url: "{{ xpath_filter_url }}?query=controller/hostInterfaces//iscsi"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: result
vars:
xpath_filter_url: "{{ credentials.api_url }}/storage-systems/{{ credentials.ssid }}/graph/xpath-filter"
# Extract controller A's port information from the iscsi host interfaces list
# Note: min filter is used because there are only two controller ids and the smaller corresponds with controller B
- name: Get controller B's controllerId
set_fact:
controller_b_id: "{{ result | json_query('json[*].controllerId') | max }}"
# Compile any iscsi port information associated with controller B
- name: Get controller B's port information
set_fact:
controller_b: "{{ result | json_query(controller_b_query) }}"
vars:
controller_b_query: "json[?controllerId=='{{ controller_b_id }}']"
# Confirm that all of controller B's ports are disabled
- name: Verify all controller B ports are disabled
assert:
that: "{{ item.ipv4Enabled == false }}"
msg: "Controller B, channel {{ item.channel }} is not disabled"
loop: "{{ controller_b }}"

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 10.113.1.111:8443
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,68 +0,0 @@
# Test code for the netapp_e_iscsi_interface module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
- name: NetApp Test iSCSI Target module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined
or netapp_e_api_host is undefined or netapp_e_ssid is undefined
vars: &vars
credentials: &creds
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
secrets: &secrets
# 12 characters
- 012345678912
# 16 characters
- 0123456789123456
- name: set credentials
set_fact:
credentials: *creds
- name: Show some debug information
debug:
msg: "Using user={{ credentials.api_username }} on server={{ credentials.api_url }}."
- name: Ensure we can set the chap secret
netapp_e_iscsi_target:
<<: *creds
name: myTarget
chap_secret: "{{ item }}"
loop: *secrets
- name: Turn off all of the options
netapp_e_iscsi_target:
<<: *creds
name: abc
ping: no
unnamed_discovery: no
- name: Ensure we can set the ping option
netapp_e_iscsi_target:
<<: *creds
name: myTarget
ping: yes
unnamed_discovery: yes
register: result
- name: Ensure we received a change
assert:
that: result.changed
- name: Run the ping change in check-mode
netapp_e_iscsi_target:
<<: *creds
name: myTarget
ping: yes
unnamed_discovery: yes
check_mode: yes
register: result
- name: Ensure no change resulted
assert:
that: not result.changed

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 192.168.1.100
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

View file

@ -1,326 +0,0 @@
# Test code for the netapp_e_iscsi_interface module
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
- name: NetApp Test ASUP module
fail:
msg: 'Please define netapp_e_api_username, netapp_e_api_password, netapp_e_api_host, and netapp_e_ssid.'
when: netapp_e_api_username is undefined or netapp_e_api_password is undefined
or netapp_e_api_host is undefined or netapp_e_ssid is undefined
vars:
credentials: &creds
api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
api_username: "{{ netapp_e_api_username }}"
api_password: "{{ netapp_e_api_password }}"
ssid: "{{ netapp_e_ssid }}"
validate_certs: no
- name: set credentials
set_fact:
credentials: *creds
# ****************************************************
# *** Setup test hosts, storage pools, and volumes ***
# ****************************************************
- name: Create host for host mapping
netapp_e_host:
<<: *creds
state: present
name: test_host_mapping_host
host_type: 27
- netapp_e_host:
<<: *creds
state: present
name: test_host1
host_type: 27
- netapp_e_host:
<<: *creds
state: present
name: test_host2
host_type: 27
- name: Create storage pool for host mapping
netapp_e_storagepool:
<<: *creds
state: present
name: test_host_mapping_storage_pool
raid_level: raid0
criteria_min_usable_capacity: 1
- name: Create volume for host mapping
netapp_e_volume:
<<: *creds
state: present
name: test_host_mapping_volume
storage_pool_name: test_host_mapping_storage_pool
size: 1
- name: Create volume for host mapping
netapp_e_volume:
<<: *creds
state: present
name: test_host_mapping_volume2
storage_pool_name: test_host_mapping_storage_pool
size: 1
# **********************************************
# *** Create new lun between host and volume ***
# **********************************************
- name: Create netapp_e_lun_mapping
netapp_e_lun_mapping:
<<: *creds
state: present
target: test_host_mapping_host
volume: test_host_mapping_volume
register: result
- name: Verify lun mapping
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ netapp_e_ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- assert:
that: "{{ item['mapped'] }}"
msg: "Lun failed to be created."
loop: "{{ lookup('list', current.json)}}"
# QUICK VERIFICATION OF MISMATCHING TARGET/TARGET_TYPE - GOOD
#- name: Create netapp_e_lun_mapping
# netapp_e_lun_mapping:
# <<: *creds
# state: present
# target: test_host_mapping_host
# volume: test_host_mapping_volume
# lun: 100
# target_type: group
# register: result
#
#- pause: seconds=30
# **************************************************************
# *** Repeat previous lun creation play and verify unchanged ***
# **************************************************************
- name: Repeat lun creation
netapp_e_lun_mapping:
<<: *creds
state: present
target: test_host_mapping_host
volume: test_host_mapping_volume
register: result
- name: Verify lun mapping
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ netapp_e_ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- assert:
that: "{{ item['mapped'] and result.changed==False }}"
msg: "Lun failed to be unchanged."
loop: "{{ lookup('list', current.json)}}"
# ****************************************************************
# *** Move existing lun to default target and verify unchanged ***
# ****************************************************************
- name: Move lun to default target
netapp_e_lun_mapping:
<<: *creds
state: present
volume: test_host_mapping_volume
register: result
- name: Verify lun mapping
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ netapp_e_ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- assert:
that: "{{ item['mapped'] }}"
msg: "Lun failed to be created."
loop: "{{ lookup('list', current.json)}}"
# *****************************************************************
# *** Move existing lun to specific target and verify unchanged ***
# *****************************************************************
- name: Move lun to default target
netapp_e_lun_mapping:
<<: *creds
state: present
target: test_host_mapping_host
volume: test_host_mapping_volume
register: result
- name: Verify lun mapping
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ netapp_e_ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- assert:
that: "{{ item['mapped'] }}"
msg: "Lun failed to be created."
loop: "{{ lookup('list', current.json)}}"
# *******************************************
# *** Modify a volume mapping's lun value ***
# *******************************************
- name: Change volume mapping's lun value
netapp_e_lun_mapping:
<<: *creds
state: present
target: test_host_mapping_host
volume: test_host_mapping_volume
lun: 100
register: result
- pause: seconds=15
- name: Verify lun mapping
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ netapp_e_ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- assert:
that: "{{ result.changed }}"
msg: "Lun failed to be unchanged."
loop: "{{ lookup('list', current.json)}}"
- name: Verify mapping fails when lun already in use on existing host object
netapp_e_lun_mapping:
<<: *creds
state: present
target: test_host_mapping_host
volume: test_host_mapping_volume2
lun: 100
register: result
ignore_errors: True
- pause: seconds=15
- assert:
that: "{{ not result.changed }}"
msg: "Lun succeeded when it should have failed."
loop: "{{ lookup('list', current.json)}}"
- name: Verify mapping succeeds when the same lun is used on multiple host objects.
netapp_e_lun_mapping:
<<: *creds
state: present
target: test_host1
volume: test_host_mapping_volume2
lun: 100
register: result
- pause: seconds=15
- assert:
that: "{{ result.changed }}"
msg: "Lun failed to be unchanged."
loop: "{{ lookup('list', current.json)}}"
# *************************************************************************************************
# *** Verify that exact mapping details but different lun results in an unchanged configuration ***
# *************************************************************************************************
- name: Verify that exact mapping details but different lun results in an unchanged configuration
netapp_e_lun_mapping:
<<: *creds
state: absent
target: test_host_mapping_host
volume: test_host_mapping_volume
lun: 99
register: result
- name: Verify lun mapping
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ netapp_e_ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- assert:
that: "{{ item['mapped'] and not result.changed }}"
msg: "Lun failed to be unchanged."
loop: "{{ lookup('list', current.json)}}"
# ********************************
# *** Delete newly created lun ***
# ********************************
- name: Delete lun creation
netapp_e_lun_mapping:
<<: *creds
state: absent
target: test_host_mapping_host
volume: test_host_mapping_volume
register: result
- name: Verify lun mapping
uri:
url: "{{ credentials.api_url }}/storage-systems/{{ netapp_e_ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
user: "{{ credentials.api_username }}"
password: "{{ credentials.api_password }}"
body_format: json
validate_certs: no
register: current
- assert:
that: "{{ not item['mapped'] }}"
msg: "Lun failed to be created."
loop: "{{ lookup('list', current.json)}}"
# ********************************************************
# *** Tear down test hosts, storage pools, and volumes ***
# ********************************************************
- name: Delete volume for host mapping
netapp_e_volume:
<<: *creds
state: absent
name: test_host_mapping_volume
storage_pool_name: test_host_mapping_storage_pool
size: 1
- name: Delete volume for host mapping
netapp_e_volume:
<<: *creds
state: absent
name: test_host_mapping_volume2
storage_pool_name: test_host_mapping_storage_pool
size: 1
- name: Delete storage pool for host mapping
netapp_e_storagepool:
<<: *creds
state: absent
name: test_host_mapping_storage_pool
raid_level: raid0
criteria_min_usable_capacity: 1
- name: Delete host for host mapping
netapp_e_host:
<<: *creds
state: absent
name: test_host_mapping_host
host_type_index: 27
- name: Delete host for host mapping
netapp_e_host:
<<: *creds
state: absent
name: test_host2
host_type_index: 27
- name: Delete host for host mapping
netapp_e_host:
<<: *creds
state: absent
name: test_host1
host_type_index: 27

View file

@ -1,10 +0,0 @@
# This test is not enabled by default, but can be utilized by defining required variables in integration_config.yml
# Example integration_config.yml:
# ---
#netapp_e_api_host: 10.113.1.111:8443
#netapp_e_api_username: admin
#netapp_e_api_password: myPass
#netapp_e_ssid: 1
unsupported
netapp/eseries

View file

@ -1 +0,0 @@
- include_tasks: run.yml

Some files were not shown because too many files have changed in this diff Show more