mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
[wip] Remove network content (#84)
* rebase * remove broken symlinks * more deletes * restore cs_* integration tests * More deletes - from Felix * cs_common * Remove some more ignores
This commit is contained in:
parent
8d203225d3
commit
c313c825f4
2215 changed files with 0 additions and 333978 deletions
|
@ -1,79 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible import constants as C
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.aireos.aireos import aireos_provider_spec
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'aireos_config' else False
|
|
||||||
|
|
||||||
if self._play_context.connection != 'local':
|
|
||||||
return dict(
|
|
||||||
failed=True,
|
|
||||||
msg='invalid connection specified, expected connection=local, '
|
|
||||||
'got %s' % self._play_context.connection
|
|
||||||
)
|
|
||||||
|
|
||||||
provider = load_provider(aireos_provider_spec, self._task.args)
|
|
||||||
|
|
||||||
pc = copy.deepcopy(self._play_context)
|
|
||||||
pc.connection = 'network_cli'
|
|
||||||
pc.network_os = 'aireos'
|
|
||||||
pc.remote_addr = provider['host'] or self._play_context.remote_addr
|
|
||||||
pc.port = int(provider['port'] or self._play_context.port or 22)
|
|
||||||
pc.remote_user = provider['username'] or self._play_context.connection_user
|
|
||||||
pc.password = provider['password'] or self._play_context.password
|
|
||||||
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
|
|
||||||
|
|
||||||
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
|
|
||||||
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
|
|
||||||
connection.set_options(direct={'persistent_command_timeout': command_timeout})
|
|
||||||
|
|
||||||
socket_path = connection.run()
|
|
||||||
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
|
|
||||||
if not socket_path:
|
|
||||||
return {'failed': True,
|
|
||||||
'msg': 'unable to open shell. Please see: ' +
|
|
||||||
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
|
|
||||||
|
|
||||||
task_vars['ansible_socket'] = socket_path
|
|
||||||
|
|
||||||
if self._play_context.become_method == 'enable':
|
|
||||||
self._play_context.become = False
|
|
||||||
self._play_context.become_method = None
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
|
||||||
|
|
||||||
return result
|
|
|
@ -1,79 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible import constants as C
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.aruba.aruba import aruba_provider_spec
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'aruba_config' else False
|
|
||||||
|
|
||||||
if self._play_context.connection != 'local':
|
|
||||||
return dict(
|
|
||||||
failed=True,
|
|
||||||
msg='invalid connection specified, expected connection=local, '
|
|
||||||
'got %s' % self._play_context.connection
|
|
||||||
)
|
|
||||||
|
|
||||||
provider = load_provider(aruba_provider_spec, self._task.args)
|
|
||||||
|
|
||||||
pc = copy.deepcopy(self._play_context)
|
|
||||||
pc.connection = 'network_cli'
|
|
||||||
pc.network_os = 'aruba'
|
|
||||||
pc.remote_addr = provider['host'] or self._play_context.remote_addr
|
|
||||||
pc.port = int(provider['port'] or self._play_context.port or 22)
|
|
||||||
pc.remote_user = provider['username'] or self._play_context.connection_user
|
|
||||||
pc.password = provider['password'] or self._play_context.password
|
|
||||||
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
|
|
||||||
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
|
|
||||||
|
|
||||||
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
|
|
||||||
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
|
|
||||||
connection.set_options(direct={'persistent_command_timeout': command_timeout})
|
|
||||||
|
|
||||||
socket_path = connection.run()
|
|
||||||
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
|
|
||||||
if not socket_path:
|
|
||||||
return {'failed': True,
|
|
||||||
'msg': 'unable to open shell. Please see: ' +
|
|
||||||
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
|
|
||||||
|
|
||||||
task_vars['ansible_socket'] = socket_path
|
|
||||||
|
|
||||||
if self._play_context.become_method == 'enable':
|
|
||||||
self._play_context.become = False
|
|
||||||
self._play_context.become_method = None
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
|
||||||
return result
|
|
|
@ -1,89 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright: (c) 2016, Red Hat Inc.
|
|
||||||
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible import constants as C
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import ce_provider_spec
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
CLI_SUPPORTED_MODULES = ['ce_rollback', 'ce_mlag_interface', 'ce_startup', 'ce_config',
|
|
||||||
'ce_command', 'ce_facts', 'ce_evpn_global', 'ce_evpn_bgp_rr',
|
|
||||||
'ce_mtu', 'ce_evpn_bgp', 'ce_snmp_location', 'ce_snmp_contact',
|
|
||||||
'ce_snmp_traps', 'ce_netstream_global', 'ce_netstream_aging',
|
|
||||||
'ce_netstream_export', 'ce_netstream_template', 'ce_ntp_auth',
|
|
||||||
'ce_stp', 'ce_vxlan_global', 'ce_vxlan_arp', 'ce_vxlan_gateway',
|
|
||||||
'ce_acl_interface']
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'ce_config' else False
|
|
||||||
socket_path = None
|
|
||||||
persistent_connection = self._play_context.connection.split('.')[-1]
|
|
||||||
|
|
||||||
if self._play_context.connection == 'local':
|
|
||||||
provider = load_provider(ce_provider_spec, self._task.args)
|
|
||||||
transport = provider['transport'] or 'cli'
|
|
||||||
|
|
||||||
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
|
|
||||||
|
|
||||||
if transport == 'cli':
|
|
||||||
pc = copy.deepcopy(self._play_context)
|
|
||||||
pc.connection = 'network_cli'
|
|
||||||
pc.network_os = 'ce'
|
|
||||||
pc.remote_addr = provider['host'] or self._play_context.remote_addr
|
|
||||||
pc.port = int(provider['port'] or self._play_context.port or 22)
|
|
||||||
pc.remote_user = provider['username'] or self._play_context.connection_user
|
|
||||||
pc.password = provider['password'] or self._play_context.password
|
|
||||||
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
|
|
||||||
self._task.args['provider'] = provider.update(
|
|
||||||
host=pc.remote_addr,
|
|
||||||
port=pc.port,
|
|
||||||
username=pc.remote_user,
|
|
||||||
password=pc.password
|
|
||||||
)
|
|
||||||
if module_name in ['ce_netconf'] or module_name not in CLI_SUPPORTED_MODULES:
|
|
||||||
pc.connection = 'netconf'
|
|
||||||
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
|
|
||||||
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
|
|
||||||
connection.set_options(direct={'persistent_command_timeout': command_timeout})
|
|
||||||
|
|
||||||
socket_path = connection.run()
|
|
||||||
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
|
|
||||||
if not socket_path:
|
|
||||||
return {'failed': True,
|
|
||||||
'msg': 'unable to open shell. Please see: ' +
|
|
||||||
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
|
|
||||||
|
|
||||||
task_vars['ansible_socket'] = socket_path
|
|
||||||
# make sure a transport value is set in args
|
|
||||||
self._task.args['transport'] = transport
|
|
||||||
self._task.args['provider'] = provider
|
|
||||||
elif persistent_connection in ('netconf', 'network_cli'):
|
|
||||||
provider = self._task.args.get('provider', {})
|
|
||||||
if any(provider.values()):
|
|
||||||
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
|
|
||||||
del self._task.args['provider']
|
|
||||||
|
|
||||||
if (persistent_connection == 'network_cli' and module_name not in CLI_SUPPORTED_MODULES) or \
|
|
||||||
(persistent_connection == 'netconf' and module_name in CLI_SUPPORTED_MODULES):
|
|
||||||
return {'failed': True, 'msg': "Connection type '%s' is not valid for '%s' module."
|
|
||||||
% (self._play_context.connection, self._task.action)}
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
|
||||||
return result
|
|
|
@ -1,104 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import glob
|
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlsplit
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.community.general.plugins.action.ce import ActionModule as _ActionModule
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(_ActionModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._handle_template()
|
|
||||||
except (ValueError, AttributeError) as exc:
|
|
||||||
return dict(failed=True, msg=exc.message)
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(tmp, task_vars)
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
if self._task.args.get('backup') and result.get('__backup__'):
|
|
||||||
# User requested backup and no error occurred in module.
|
|
||||||
# NOTE: If there is a parameter error, __backup__ key may not be in results.
|
|
||||||
self._write_backup(task_vars['inventory_hostname'], result['__backup__'])
|
|
||||||
|
|
||||||
if '__backup__' in result:
|
|
||||||
del result['__backup__']
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
def _get_working_path(self):
|
|
||||||
cwd = self._loader.get_basedir()
|
|
||||||
if self._task._role is not None:
|
|
||||||
cwd = self._task._role._role_path
|
|
||||||
return cwd
|
|
||||||
|
|
||||||
def _write_backup(self, host, contents):
|
|
||||||
backup_path = self._get_working_path() + '/backup'
|
|
||||||
if not os.path.exists(backup_path):
|
|
||||||
os.mkdir(backup_path)
|
|
||||||
for fn in glob.glob('%s/%s*' % (backup_path, host)):
|
|
||||||
os.remove(fn)
|
|
||||||
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
|
|
||||||
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
|
|
||||||
open(filename, 'w').write(contents)
|
|
||||||
|
|
||||||
def _handle_template(self):
|
|
||||||
src = self._task.args.get('src')
|
|
||||||
if not src:
|
|
||||||
raise ValueError('missing required arguments: src')
|
|
||||||
|
|
||||||
working_path = self._get_working_path()
|
|
||||||
|
|
||||||
if os.path.isabs(src) or urlsplit(src).scheme:
|
|
||||||
source = src
|
|
||||||
else:
|
|
||||||
source = self._loader.path_dwim_relative(working_path, 'templates', src)
|
|
||||||
if not source:
|
|
||||||
source = self._loader.path_dwim_relative(working_path, src)
|
|
||||||
|
|
||||||
if not os.path.exists(source):
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(source, 'r') as f:
|
|
||||||
template_data = to_text(f.read())
|
|
||||||
except IOError:
|
|
||||||
return dict(failed=True, msg='unable to load src file')
|
|
||||||
|
|
||||||
# Create a template search path in the following order:
|
|
||||||
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
|
|
||||||
searchpath = [working_path]
|
|
||||||
if self._task._role is not None:
|
|
||||||
searchpath.append(self._task._role._role_path)
|
|
||||||
if hasattr(self._task, "_block:"):
|
|
||||||
dep_chain = self._task._block.get_dep_chain()
|
|
||||||
if dep_chain is not None:
|
|
||||||
for role in dep_chain:
|
|
||||||
searchpath.append(role._role_path)
|
|
||||||
searchpath.append(os.path.dirname(source))
|
|
||||||
with self._templar.set_temporary_context(searchpath=searchpath):
|
|
||||||
self._task.args['src'] = self._templar.template(template_data)
|
|
|
@ -1,69 +0,0 @@
|
||||||
# (C) 2017 Red Hat Inc.
|
|
||||||
# Copyright (C) 2017 Lenovo.
|
|
||||||
#
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
#
|
|
||||||
# Contains Action Plugin methods for CNOS Config Module
|
|
||||||
# Lenovo Networking
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible import constants as C
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.cnos.cnos import cnos_provider_spec
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'cnos_config' else False
|
|
||||||
|
|
||||||
if self._play_context.connection == 'local':
|
|
||||||
provider = load_provider(cnos_provider_spec, self._task.args)
|
|
||||||
pc = copy.deepcopy(self._play_context)
|
|
||||||
pc.connection = 'network_cli'
|
|
||||||
pc.network_os = 'cnos'
|
|
||||||
pc.remote_addr = provider['host'] or self._play_context.remote_addr
|
|
||||||
pc.port = provider['port'] or self._play_context.port or 22
|
|
||||||
pc.remote_user = provider['username'] or self._play_context.connection_user
|
|
||||||
pc.password = provider['password'] or self._play_context.password
|
|
||||||
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
|
|
||||||
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
|
|
||||||
pc.become = provider['authorize'] or True
|
|
||||||
pc.become_pass = provider['auth_pass']
|
|
||||||
pc.become_method = 'enable'
|
|
||||||
|
|
||||||
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
|
|
||||||
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
|
|
||||||
connection.set_options(direct={'persistent_command_timeout': command_timeout})
|
|
||||||
|
|
||||||
socket_path = connection.run()
|
|
||||||
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
|
|
||||||
if not socket_path:
|
|
||||||
return {'failed': True,
|
|
||||||
'msg': 'unable to open shell. Please see: ' +
|
|
||||||
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
|
|
||||||
|
|
||||||
task_vars['ansible_socket'] = socket_path
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
|
||||||
return result
|
|
|
@ -1,36 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2018 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
self._config_module = True
|
|
||||||
|
|
||||||
if self._play_context.connection.split('.')[-1] != 'network_cli':
|
|
||||||
return {'failed': True, 'msg': 'Connection type %s is not valid for this module. Must use fully qualified'
|
|
||||||
' name of network_cli connection type.' % self._play_context.connection}
|
|
||||||
|
|
||||||
return super(ActionModule, self).run(task_vars=task_vars)
|
|
|
@ -1,69 +0,0 @@
|
||||||
# (C) 2017 Red Hat Inc.
|
|
||||||
# Copyright (C) 2017 Lenovo.
|
|
||||||
#
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
#
|
|
||||||
# Contains Action Plugin methods for ENOS Config Module
|
|
||||||
# Lenovo Networking
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible import constants as C
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.enos.enos import enos_provider_spec
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'enos_config' else False
|
|
||||||
|
|
||||||
if self._play_context.connection == 'local':
|
|
||||||
provider = load_provider(enos_provider_spec, self._task.args)
|
|
||||||
pc = copy.deepcopy(self._play_context)
|
|
||||||
pc.connection = 'network_cli'
|
|
||||||
pc.network_os = 'enos'
|
|
||||||
pc.remote_addr = provider['host'] or self._play_context.remote_addr
|
|
||||||
pc.port = provider['port'] or self._play_context.port or 22
|
|
||||||
pc.remote_user = provider['username'] or self._play_context.connection_user
|
|
||||||
pc.password = provider['password'] or self._play_context.password
|
|
||||||
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
|
|
||||||
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
|
|
||||||
pc.become = provider['authorize'] or True
|
|
||||||
pc.become_pass = provider['auth_pass']
|
|
||||||
pc.become_method = 'enable'
|
|
||||||
|
|
||||||
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
|
|
||||||
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
|
|
||||||
connection.set_options(direct={'persistent_command_timeout': command_timeout})
|
|
||||||
|
|
||||||
socket_path = connection.run()
|
|
||||||
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
|
|
||||||
if not socket_path:
|
|
||||||
return {'failed': True,
|
|
||||||
'msg': 'unable to open shell. Please see: ' +
|
|
||||||
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
|
|
||||||
|
|
||||||
task_vars['ansible_socket'] = socket_path
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
|
||||||
return result
|
|
|
@ -1,45 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
EXOS_NETWORK_CLI_MODULES = (
|
|
||||||
'exos_facts',
|
|
||||||
'exos_config',
|
|
||||||
'exos_command')
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'exos_config' else False
|
|
||||||
persistent_connection = self._play_context.connection.split('.')[-1]
|
|
||||||
|
|
||||||
if persistent_connection not in ('network_cli', 'httpapi'):
|
|
||||||
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
|
|
||||||
|
|
||||||
if persistent_connection == 'network_cli' and module_name not in self.EXOS_NETWORK_CLI_MODULES:
|
|
||||||
return {'failed': True, 'msg': "Connection type %s is not valid for this module" % self._play_context.connection}
|
|
||||||
|
|
||||||
return super(ActionModule, self).run(task_vars=task_vars)
|
|
|
@ -1,80 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.ironware.ironware import ironware_provider_spec
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'ironware_config' else False
|
|
||||||
persistent_connection = self._play_context.connection.split('.')[-1]
|
|
||||||
|
|
||||||
if persistent_connection == 'network_cli':
|
|
||||||
provider = self._task.args.get('provider', {})
|
|
||||||
if any(provider.values()):
|
|
||||||
display.warning('provider is unnecessary when using network_cli and will be ignored')
|
|
||||||
del self._task.args['provider']
|
|
||||||
elif self._play_context.connection == 'local':
|
|
||||||
provider = load_provider(ironware_provider_spec, self._task.args)
|
|
||||||
pc = copy.deepcopy(self._play_context)
|
|
||||||
pc.connection = 'network_cli'
|
|
||||||
pc.network_os = 'ironware'
|
|
||||||
pc.remote_addr = provider['host'] or self._play_context.remote_addr
|
|
||||||
pc.port = int(provider['port'] or self._play_context.port or 22)
|
|
||||||
pc.remote_user = provider['username'] or self._play_context.connection_user
|
|
||||||
pc.password = provider['password'] or self._play_context.password
|
|
||||||
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
|
|
||||||
pc.become = provider['authorize'] or False
|
|
||||||
if pc.become:
|
|
||||||
pc.become_method = 'enable'
|
|
||||||
pc.become_pass = provider['auth_pass']
|
|
||||||
|
|
||||||
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
|
|
||||||
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
|
|
||||||
|
|
||||||
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
|
|
||||||
connection.set_options(direct={'persistent_command_timeout': command_timeout})
|
|
||||||
|
|
||||||
socket_path = connection.run()
|
|
||||||
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
|
|
||||||
if not socket_path:
|
|
||||||
return {'failed': True,
|
|
||||||
'msg': 'unable to open shell. Please see: ' +
|
|
||||||
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
|
|
||||||
|
|
||||||
task_vars['ansible_socket'] = socket_path
|
|
||||||
else:
|
|
||||||
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
|
||||||
return result
|
|
|
@ -1,31 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2018 Extreme Networks Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
self._config_module = True
|
|
||||||
return super(ActionModule, self).run(task_vars=task_vars)
|
|
|
@ -1,31 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017, Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
self._config_module = True
|
|
||||||
return super(ActionModule, self).run(task_vars=task_vars)
|
|
|
@ -1,40 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2018 Extreme Networks Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
|
|
||||||
PRIVATE_KEYS_RE = re.compile('__.+__')
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'slxos_config' else False
|
|
||||||
persistent_connection = self._play_context.connection.split('.')[-1]
|
|
||||||
|
|
||||||
if persistent_connection not in ('network_cli'):
|
|
||||||
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
|
|
||||||
return super(ActionModule, self).run(task_vars=task_vars)
|
|
|
@ -1,77 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from ansible import constants as C
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.sros.sros import sros_provider_spec
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
persistent_connection = self._play_context.connection.split('.')[-1]
|
|
||||||
|
|
||||||
self._config_module = True if module_name == 'sros_config' else False
|
|
||||||
if persistent_connection == 'network_cli':
|
|
||||||
provider = self._task.args.get('provider', {})
|
|
||||||
if any(provider.values()):
|
|
||||||
display.warning('provider is unnecessary when using network_cli and will be ignored')
|
|
||||||
del self._task.args['provider']
|
|
||||||
elif self._play_context.connection == 'local':
|
|
||||||
provider = load_provider(sros_provider_spec, self._task.args)
|
|
||||||
|
|
||||||
pc = copy.deepcopy(self._play_context)
|
|
||||||
pc.connection = 'network_cli'
|
|
||||||
pc.network_os = 'sros'
|
|
||||||
pc.remote_addr = provider['host'] or self._play_context.remote_addr
|
|
||||||
pc.port = int(provider['port'] or self._play_context.port or 22)
|
|
||||||
pc.remote_user = provider['username'] or self._play_context.connection_user
|
|
||||||
pc.password = provider['password'] or self._play_context.password
|
|
||||||
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
|
|
||||||
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
|
|
||||||
|
|
||||||
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
|
|
||||||
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
|
|
||||||
connection.set_options(direct={'persistent_command_timeout': command_timeout})
|
|
||||||
|
|
||||||
socket_path = connection.run()
|
|
||||||
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
|
|
||||||
if not socket_path:
|
|
||||||
return {'failed': True,
|
|
||||||
'msg': 'unable to open shell. Please see: ' +
|
|
||||||
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
|
|
||||||
|
|
||||||
task_vars['ansible_socket'] = socket_path
|
|
||||||
else:
|
|
||||||
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
|
|
||||||
|
|
||||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
|
||||||
return result
|
|
|
@ -1,36 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2018 Extreme Networks Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
|
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(ActionNetworkModule):
|
|
||||||
|
|
||||||
def run(self, tmp=None, task_vars=None):
|
|
||||||
del tmp # tmp no longer has any effect
|
|
||||||
|
|
||||||
module_name = self._task.action.split('.')[-1]
|
|
||||||
self._config_module = True if module_name == 'voss_config' else False
|
|
||||||
persistent_connection = self._play_context.connection.split('.')[-1]
|
|
||||||
|
|
||||||
if persistent_connection not in ('network_cli'):
|
|
||||||
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
|
|
||||||
return super(ActionModule, self).run(task_vars=task_vars)
|
|
|
@ -1,95 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: aireos
|
|
||||||
short_description: Use aireos cliconf to run command on Cisco WLC platform
|
|
||||||
description:
|
|
||||||
- This aireos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Cisco WLC network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'aireos'
|
|
||||||
reply = self.get('show sysinfo')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'Product Version\.* (.*)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'System Name\.* (.*)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
reply = self.get('show inventory')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'DESCR: \"(.*)\"', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
return self.invalid_params("fetching configuration from %s is not supported" % source)
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show run-config commands'
|
|
||||||
else:
|
|
||||||
cmd = 'show run-config startup-commands'
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, command):
|
|
||||||
for cmd in chain(['config'], to_list(command), ['end']):
|
|
||||||
self.send_command(cmd)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def set_cli_prompt_context(self):
|
|
||||||
"""
|
|
||||||
Make sure we are in the operational cli mode
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if self._connection.connected:
|
|
||||||
self._update_cli_prompt_context(config_context=')#')
|
|
|
@ -1,72 +0,0 @@
|
||||||
# (C) 2018 Red Hat Inc.
|
|
||||||
# Copyright (C) 2019 APCON.
|
|
||||||
#
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
#
|
|
||||||
# Contains CLIConf Plugin methods for apconos Modules
|
|
||||||
# APCON Networking
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author: "David Li (@davidlee-ap)"
|
|
||||||
cliconf: apconos
|
|
||||||
short_description: Use apconos cliconf to run command on APCON network devices
|
|
||||||
description:
|
|
||||||
- This apconos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from APCON network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_bytes, to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'apconos'
|
|
||||||
reply = self.get(b'show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
if data:
|
|
||||||
device_info['network_os_version'] = self.parse_version(data)
|
|
||||||
device_info['network_os_model'] = self.parse_model(data)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def parse_version(self, data):
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def parse_model(self, data):
|
|
||||||
return ""
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text'):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, command):
|
|
||||||
for cmd in chain([b'configure terminal'], to_list(command), [b'end']):
|
|
||||||
self.send_command(cmd)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
return json.dumps(self.get_device_info())
|
|
|
@ -1,95 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: aruba
|
|
||||||
short_description: Use aruba cliconf to run command on Aruba platform
|
|
||||||
description:
|
|
||||||
- This aruba plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Aruba network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_bytes, to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'aruba'
|
|
||||||
reply = self.get('show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'Version (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'^MODEL: (\S+)\),', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
reply = self.get('show hostname')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'^Hostname is (.+)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
return self.invalid_params("fetching configuration from %s is not supported" % source)
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config all'
|
|
||||||
else:
|
|
||||||
cmd = 'show configuration'
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, command):
|
|
||||||
for cmd in chain(['configure terminal'], to_list(command), ['end']):
|
|
||||||
self.send_command(cmd)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def set_cli_prompt_context(self):
|
|
||||||
"""
|
|
||||||
Make sure we are in the operational cli mode
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if self._connection.connected:
|
|
||||||
self._update_cli_prompt_context(config_context=')#')
|
|
|
@ -1,121 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: ce
|
|
||||||
short_description: Use ce cliconf to run command on HUAWEI CloudEngine platform
|
|
||||||
description:
|
|
||||||
- This ce plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from HUAWEI CloudEngine network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'ce'
|
|
||||||
reply = self.get('display version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'^Huawei.+\n.+\Version\s+(\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1).strip(',')
|
|
||||||
|
|
||||||
match = re.search(r'^Huawei(.+)\n.+\(\S+\s+\S+\)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'HUAWEI\s+(\S+)\s+uptime', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running'):
|
|
||||||
return self.invalid_params("fetching configuration from %s is not supported" % source)
|
|
||||||
|
|
||||||
if not flags:
|
|
||||||
flags = []
|
|
||||||
|
|
||||||
cmd = 'display current-configuration'
|
|
||||||
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, command):
|
|
||||||
results = []
|
|
||||||
for cmd in chain(['configure terminal'], to_list(command), ['end']):
|
|
||||||
if isinstance(cmd, dict):
|
|
||||||
command = cmd['command']
|
|
||||||
prompt = cmd['prompt']
|
|
||||||
answer = cmd['answer']
|
|
||||||
newline = cmd.get('newline', True)
|
|
||||||
else:
|
|
||||||
command = cmd
|
|
||||||
prompt = None
|
|
||||||
answer = None
|
|
||||||
newline = True
|
|
||||||
|
|
||||||
results.append(self.send_command(command, prompt, answer, False, newline))
|
|
||||||
return results[1:-1]
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def set_cli_prompt_context(self):
|
|
||||||
"""
|
|
||||||
Make sure we are in the operational cli mode
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if self._connection.connected:
|
|
||||||
out = self._connection.get_prompt()
|
|
||||||
|
|
||||||
if out is None:
|
|
||||||
raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received'
|
|
||||||
u' response window: %s' % self._connection._last_recv_window)
|
|
||||||
|
|
||||||
prompt = to_text(out, errors='surrogate_then_replace').strip()
|
|
||||||
while prompt.endswith(']'):
|
|
||||||
self._connection.queue_message('vvvv', 'wrong context, sending return to device')
|
|
||||||
if prompt.startswith('[*'):
|
|
||||||
self._connection.exec_command('clear configuration candidate')
|
|
||||||
self._connection.exec_command('return')
|
|
||||||
out = self._connection.get_prompt()
|
|
||||||
prompt = to_text(out, errors='surrogate_then_replace').strip()
|
|
|
@ -1,135 +0,0 @@
|
||||||
# (C) 2017 Red Hat Inc.
|
|
||||||
# Copyright (C) 2017 Lenovo.
|
|
||||||
#
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
#
|
|
||||||
# Contains CLIConf Plugin methods for CNOS Modules
|
|
||||||
# Lenovo Networking
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: cnos
|
|
||||||
short_description: Use cnos cliconf to run command on Lenovo CNOS platform
|
|
||||||
description:
|
|
||||||
- This cnos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Lenovo CNOS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
from ansible.module_utils._text import to_bytes, to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'cnos'
|
|
||||||
reply = self.get('show sys-info')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
host = self.get('show hostname')
|
|
||||||
hostname = to_text(host, errors='surrogate_or_strict').strip()
|
|
||||||
if data:
|
|
||||||
device_info['network_os_version'] = self.parse_version(data)
|
|
||||||
device_info['network_os_model'] = self.parse_model(data)
|
|
||||||
device_info['network_os_hostname'] = hostname
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def parse_version(self, data):
|
|
||||||
for line in data.split('\n'):
|
|
||||||
line = line.strip()
|
|
||||||
match = re.match(r'System Software Revision (.*?)',
|
|
||||||
line, re.M | re.I)
|
|
||||||
if match:
|
|
||||||
vers = line.split(':')
|
|
||||||
ver = vers[1].strip()
|
|
||||||
return ver
|
|
||||||
return "NA"
|
|
||||||
|
|
||||||
def parse_model(self, data):
|
|
||||||
for line in data.split('\n'):
|
|
||||||
line = line.strip()
|
|
||||||
match = re.match(r'System Model (.*?)', line, re.M | re.I)
|
|
||||||
if match:
|
|
||||||
mdls = line.split(':')
|
|
||||||
mdl = mdls[1].strip()
|
|
||||||
return mdl
|
|
||||||
return "NA"
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
msg = "fetching configuration from %s is not supported"
|
|
||||||
return self.invalid_params(msg % source)
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config'
|
|
||||||
else:
|
|
||||||
cmd = 'show startup-config'
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, candidate=None, commit=True,
|
|
||||||
replace=None, comment=None):
|
|
||||||
resp = {}
|
|
||||||
results = []
|
|
||||||
requests = []
|
|
||||||
if commit:
|
|
||||||
self.send_command('configure terminal')
|
|
||||||
for line in to_list(candidate):
|
|
||||||
if not isinstance(line, Mapping):
|
|
||||||
line = {'command': line}
|
|
||||||
|
|
||||||
cmd = line['command']
|
|
||||||
if cmd != 'end' and cmd[0] != '!':
|
|
||||||
results.append(self.send_command(**line))
|
|
||||||
requests.append(cmd)
|
|
||||||
|
|
||||||
self.send_command('end')
|
|
||||||
else:
|
|
||||||
raise ValueError('check mode is not supported')
|
|
||||||
|
|
||||||
resp['request'] = requests
|
|
||||||
resp['response'] = results
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def set_cli_prompt_context(self):
|
|
||||||
"""
|
|
||||||
Make sure we are in the operational cli mode
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if self._connection.connected:
|
|
||||||
out = self._connection.get_prompt()
|
|
||||||
|
|
||||||
if out is None:
|
|
||||||
raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received'
|
|
||||||
u' response window: %s' % self._connection._last_recv_window)
|
|
||||||
|
|
||||||
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
|
|
||||||
self._connection.queue_message('vvvv', 'In Config mode, sending exit to device')
|
|
||||||
self._connection.send_command('exit')
|
|
||||||
else:
|
|
||||||
self._connection.send_command('enable')
|
|
|
@ -1,114 +0,0 @@
|
||||||
# Copyright: (c) 2018, Ansible Project
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: edgeos
|
|
||||||
short_description: Use edgeos cliconf to run command on EdgeOS platform
|
|
||||||
description:
|
|
||||||
- This edgeos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Ubiquiti EdgeOS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'edgeos'
|
|
||||||
reply = self.get('show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'Version:\s*v?(\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'HW model:\s*(\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
reply = self.get('show host name')
|
|
||||||
device_info['network_os_hostname'] = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
return self.send_command('show configuration commands')
|
|
||||||
|
|
||||||
def edit_config(self, candidate=None, commit=True, replace=False, comment=None):
|
|
||||||
for cmd in chain(['configure'], to_list(candidate)):
|
|
||||||
self.send_command(cmd)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def commit(self, comment=None):
|
|
||||||
if comment:
|
|
||||||
command = 'commit comment "{0}"'.format(comment)
|
|
||||||
else:
|
|
||||||
command = 'commit'
|
|
||||||
self.send_command(command)
|
|
||||||
|
|
||||||
def discard_changes(self, *args, **kwargs):
|
|
||||||
self.send_command('discard')
|
|
||||||
|
|
||||||
def run_commands(self, commands=None, check_rc=True):
|
|
||||||
if commands is None:
|
|
||||||
raise ValueError("'commands' value is required")
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if not isinstance(cmd, Mapping):
|
|
||||||
cmd = {'command': cmd}
|
|
||||||
|
|
||||||
output = cmd.pop('output', None)
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for run_commands" % output)
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = self.send_command(**cmd)
|
|
||||||
except AnsibleConnectionFailure as e:
|
|
||||||
if check_rc:
|
|
||||||
raise
|
|
||||||
out = getattr(e, 'err', e)
|
|
||||||
|
|
||||||
responses.append(out)
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
def get_device_operations(self):
|
|
||||||
return {
|
|
||||||
'supports_diff_replace': False,
|
|
||||||
'supports_commit': True,
|
|
||||||
'supports_rollback': False,
|
|
||||||
'supports_defaults': False,
|
|
||||||
'supports_onbox_diff': False,
|
|
||||||
'supports_commit_comment': True,
|
|
||||||
'supports_multiline_delimiter': False,
|
|
||||||
'supports_diff_match': False,
|
|
||||||
'supports_diff_ignore_lines': False,
|
|
||||||
'supports_generate_diff': False,
|
|
||||||
'supports_replace': False
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
result['rpc'] += ['commit', 'discard_changes', 'run_commands']
|
|
||||||
result['device_operations'] = self.get_device_operations()
|
|
||||||
return json.dumps(result)
|
|
|
@ -1,141 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2018 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: edgeswitch
|
|
||||||
short_description: Use edgeswitch cliconf to run command on EdgeSwitch platform
|
|
||||||
description:
|
|
||||||
- This edgeswitch plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Ubiquiti EdgeSwitch network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import dumps
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'edgeswitch'
|
|
||||||
reply = self.get(command='show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'Software Version\.+ (.*)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1).strip(',')
|
|
||||||
|
|
||||||
match = re.search(r'^Machine Model\.+ (.*)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'System Name\.+ (.*)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', flags=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config '
|
|
||||||
else:
|
|
||||||
cmd = 'show startup-config '
|
|
||||||
|
|
||||||
if flags:
|
|
||||||
cmd += ' '.join(to_list(flags))
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, commands):
|
|
||||||
resp = {}
|
|
||||||
|
|
||||||
results = []
|
|
||||||
requests = []
|
|
||||||
self.send_command('configure')
|
|
||||||
for line in to_list(commands):
|
|
||||||
if not isinstance(line, Mapping):
|
|
||||||
line = {'command': line}
|
|
||||||
|
|
||||||
cmd = line['command']
|
|
||||||
if cmd != 'end' and cmd[0] != '!':
|
|
||||||
results.append(self.send_command(**line))
|
|
||||||
requests.append(cmd)
|
|
||||||
|
|
||||||
self.send_command('end')
|
|
||||||
|
|
||||||
resp['request'] = requests
|
|
||||||
resp['response'] = results
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
|
|
||||||
if not command:
|
|
||||||
raise ValueError('must provide value of command to execute')
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for get" % output)
|
|
||||||
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
result['rpc'] += ['run_commands']
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def run_commands(self, commands=None, check_rc=True):
|
|
||||||
if commands is None:
|
|
||||||
raise ValueError("'commands' value is required")
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if not isinstance(cmd, Mapping):
|
|
||||||
cmd = {'command': cmd}
|
|
||||||
|
|
||||||
output = cmd.pop('output', None)
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for run_commands" % output)
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = self.send_command(**cmd)
|
|
||||||
except AnsibleConnectionFailure as e:
|
|
||||||
if check_rc:
|
|
||||||
raise
|
|
||||||
out = getattr(e, 'err', e)
|
|
||||||
|
|
||||||
responses.append(out)
|
|
||||||
|
|
||||||
return responses
|
|
|
@ -1,103 +0,0 @@
|
||||||
# (C) 2017 Red Hat Inc.
|
|
||||||
# Copyright (C) 2017 Lenovo.
|
|
||||||
#
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
#
|
|
||||||
# Contains CLIConf Plugin methods for ENOS Modules
|
|
||||||
# Lenovo Networking
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: enos
|
|
||||||
short_description: Use enos cliconf to run command on Lenovo ENOS platform
|
|
||||||
description:
|
|
||||||
- This enos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Lenovo ENOS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'enos'
|
|
||||||
reply = self.get('show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'^Software Version (.*?) ', data, re.M | re.I)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'^Lenovo RackSwitch (\S+)', data, re.M | re.I)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'^(.+) uptime', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
else:
|
|
||||||
device_info['network_os_hostname'] = "NA"
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
msg = "fetching configuration from %s is not supported"
|
|
||||||
return self.invalid_params(msg % source)
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config'
|
|
||||||
else:
|
|
||||||
cmd = 'show startup-config'
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, command):
|
|
||||||
for cmd in chain(['configure terminal'], to_list(command), ['end']):
|
|
||||||
self.send_command(cmd)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def set_cli_prompt_context(self):
|
|
||||||
"""
|
|
||||||
Make sure we are in the operational cli mode
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if self._connection.connected:
|
|
||||||
out = self._connection.get_prompt()
|
|
||||||
|
|
||||||
if out is None:
|
|
||||||
raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received'
|
|
||||||
u' response window: %s' % self._connection._last_recv_window)
|
|
||||||
|
|
||||||
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
|
|
||||||
self._connection.queue_message('vvvv', 'In Config mode, sending exit to device')
|
|
||||||
self._connection.send_command('exit')
|
|
||||||
else:
|
|
||||||
self._connection.send_command('enable')
|
|
|
@ -1,97 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2019 Ericsson AB.
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author: Ericsson IPOS OAM team
|
|
||||||
cliconf: eccli
|
|
||||||
short_description: Use eccli cliconf to run command on Ericsson ECCLI platform
|
|
||||||
description:
|
|
||||||
- This eccli plugin provides low level abstraction APIs for
|
|
||||||
sending and receiving CLI commands from Ericsson ECCLI network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
import collections
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.six import iteritems
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_config(self, source='running', flags=None, format=None):
|
|
||||||
return
|
|
||||||
|
|
||||||
def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
|
|
||||||
return
|
|
||||||
|
|
||||||
def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
|
|
||||||
if not command:
|
|
||||||
raise ValueError('must provide value of command to execute')
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for get" % output)
|
|
||||||
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
device_info['network_os'] = 'eric_eccli'
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = dict()
|
|
||||||
result['rpc'] = self.get_base_rpc() + ['run_commands']
|
|
||||||
result['network_api'] = 'cliconf'
|
|
||||||
result['device_info'] = self.get_device_info()
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def run_commands(self, commands=None, check_rc=True):
|
|
||||||
if commands is None:
|
|
||||||
raise ValueError("'commands' value is required")
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if not isinstance(cmd, Mapping):
|
|
||||||
cmd = {'command': cmd}
|
|
||||||
|
|
||||||
output = cmd.pop('output', None)
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for run_commands" % output)
|
|
||||||
try:
|
|
||||||
out = self.send_command(**cmd)
|
|
||||||
except AnsibleConnectionFailure as e:
|
|
||||||
if check_rc:
|
|
||||||
raise
|
|
||||||
out = getattr(e, 'err', e)
|
|
||||||
|
|
||||||
responses.append(out)
|
|
||||||
|
|
||||||
return responses
|
|
|
@ -1,229 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: exos
|
|
||||||
short_description: Use exos cliconf to run command on Extreme EXOS platform
|
|
||||||
description:
|
|
||||||
- This exos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Extreme EXOS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_bytes, to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.module_utils.connection import ConnectionError
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps
|
|
||||||
from ansible.plugins.cliconf import CliconfBase
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
|
|
||||||
diff = {}
|
|
||||||
device_operations = self.get_device_operations()
|
|
||||||
option_values = self.get_option_values()
|
|
||||||
|
|
||||||
if candidate is None and device_operations['supports_generate_diff']:
|
|
||||||
raise ValueError("candidate configuration is required to generate diff")
|
|
||||||
|
|
||||||
if diff_match not in option_values['diff_match']:
|
|
||||||
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
|
|
||||||
|
|
||||||
if diff_replace not in option_values['diff_replace']:
|
|
||||||
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
|
|
||||||
|
|
||||||
# prepare candidate configuration
|
|
||||||
candidate_obj = NetworkConfig(indent=1)
|
|
||||||
candidate_obj.load(candidate)
|
|
||||||
|
|
||||||
if running and diff_match != 'none' and diff_replace != 'config':
|
|
||||||
# running configuration
|
|
||||||
running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines)
|
|
||||||
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
|
|
||||||
|
|
||||||
else:
|
|
||||||
configdiffobjs = candidate_obj.items
|
|
||||||
|
|
||||||
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
|
|
||||||
return diff
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
device_info['network_os'] = 'exos'
|
|
||||||
|
|
||||||
reply = self.run_commands({'command': 'show switch detail', 'output': 'text'})
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'ExtremeXOS version (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'System Type: +(\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'SysName: +(\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_default_flag(self):
|
|
||||||
# The flag to modify the command to collect configuration with defaults
|
|
||||||
return 'detail'
|
|
||||||
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
options_values = self.get_option_values()
|
|
||||||
if format not in options_values['format']:
|
|
||||||
raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format'])))
|
|
||||||
|
|
||||||
lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'}
|
|
||||||
if source not in lookup:
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
|
|
||||||
cmd = {'command': lookup[source], 'output': 'text'}
|
|
||||||
|
|
||||||
if source == 'startup':
|
|
||||||
reply = self.run_commands({'command': 'show switch', 'format': 'text'})
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE)
|
|
||||||
if match:
|
|
||||||
cmd['command'] += match.group(1)
|
|
||||||
else:
|
|
||||||
# No Startup(/Selected) Config
|
|
||||||
return {}
|
|
||||||
|
|
||||||
cmd['command'] += ' '.join(to_list(flags))
|
|
||||||
cmd['command'] = cmd['command'].strip()
|
|
||||||
|
|
||||||
return self.run_commands(cmd)[0]
|
|
||||||
|
|
||||||
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
|
|
||||||
resp = {}
|
|
||||||
operations = self.get_device_operations()
|
|
||||||
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
|
|
||||||
results = []
|
|
||||||
requests = []
|
|
||||||
|
|
||||||
if commit:
|
|
||||||
for line in to_list(candidate):
|
|
||||||
if not isinstance(line, Mapping):
|
|
||||||
line = {'command': line}
|
|
||||||
results.append(self.send_command(**line))
|
|
||||||
requests.append(line['command'])
|
|
||||||
else:
|
|
||||||
raise ValueError('check mode is not supported')
|
|
||||||
|
|
||||||
resp['request'] = requests
|
|
||||||
resp['response'] = results
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
|
|
||||||
if output:
|
|
||||||
command = self._get_command_with_output(command, output)
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def run_commands(self, commands=None, check_rc=True):
|
|
||||||
if commands is None:
|
|
||||||
raise ValueError("'commands' value is required")
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if not isinstance(cmd, Mapping):
|
|
||||||
cmd = {'command': cmd}
|
|
||||||
|
|
||||||
output = cmd.pop('output', None)
|
|
||||||
if output:
|
|
||||||
cmd['command'] = self._get_command_with_output(cmd['command'], output)
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = self.send_command(**cmd)
|
|
||||||
except AnsibleConnectionFailure as e:
|
|
||||||
if check_rc is True:
|
|
||||||
raise
|
|
||||||
out = getattr(e, 'err', e)
|
|
||||||
|
|
||||||
if out is not None:
|
|
||||||
try:
|
|
||||||
out = to_text(out, errors='surrogate_or_strict').strip()
|
|
||||||
except UnicodeError:
|
|
||||||
raise ConnectionError(message=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
|
|
||||||
|
|
||||||
if output and output == 'json':
|
|
||||||
try:
|
|
||||||
out = json.loads(out)
|
|
||||||
except ValueError:
|
|
||||||
raise ConnectionError('Response was not valid JSON, got {0}'.format(
|
|
||||||
to_text(out)
|
|
||||||
))
|
|
||||||
responses.append(out)
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
def get_device_operations(self):
|
|
||||||
return {
|
|
||||||
'supports_diff_replace': False, # identify if config should be merged or replaced is supported
|
|
||||||
'supports_commit': False, # identify if commit is supported by device or not
|
|
||||||
'supports_rollback': False, # identify if rollback is supported or not
|
|
||||||
'supports_defaults': True, # identify if fetching running config with default is supported
|
|
||||||
'supports_commit_comment': False, # identify if adding comment to commit is supported of not
|
|
||||||
'supports_onbox_diff': False, # identify if on box diff capability is supported or not
|
|
||||||
'supports_generate_diff': True, # identify if diff capability is supported within plugin
|
|
||||||
'supports_multiline_delimiter': False, # identify if multiline delimiter is supported within config
|
|
||||||
'supports_diff_match': True, # identify if match is supported
|
|
||||||
'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported
|
|
||||||
'supports_config_replace': False, # identify if running config replace with candidate config is supported
|
|
||||||
'supports_admin': False, # identify if admin configure mode is supported or not
|
|
||||||
'supports_commit_label': False, # identify if commit label is supported or not
|
|
||||||
'supports_replace': False
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_option_values(self):
|
|
||||||
return {
|
|
||||||
'format': ['text', 'json'],
|
|
||||||
'diff_match': ['line', 'strict', 'exact', 'none'],
|
|
||||||
'diff_replace': ['line', 'block'],
|
|
||||||
'output': ['text', 'json']
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
result['rpc'] += ['run_commmands', 'get_default_flag', 'get_diff']
|
|
||||||
result['device_operations'] = self.get_device_operations()
|
|
||||||
result['device_info'] = self.get_device_info()
|
|
||||||
result.update(self.get_option_values())
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def _get_command_with_output(self, command, output):
|
|
||||||
if output not in self.get_option_values().get('output'):
|
|
||||||
raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output'))))
|
|
||||||
|
|
||||||
if output == 'json' and not command.startswith('run script cli2json.py'):
|
|
||||||
cmd = 'run script cli2json.py %s' % command
|
|
||||||
else:
|
|
||||||
cmd = command
|
|
||||||
return cmd
|
|
|
@ -1,314 +0,0 @@
|
||||||
# Copyright: (c) 2019, Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author: Ruckus Wireless (@Commscope)
|
|
||||||
cliconf: icx
|
|
||||||
short_description: Use icx cliconf to run command on Ruckus ICX platform
|
|
||||||
description:
|
|
||||||
- This icx plugin provides low level abstraction APIs for
|
|
||||||
sending and receiving CLI commands from Ruckus ICX network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.six import iteritems
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', flags=None, format=None, compare=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
|
|
||||||
if format:
|
|
||||||
raise ValueError("'format' value %s is not supported for get_config" % format)
|
|
||||||
|
|
||||||
if not flags:
|
|
||||||
flags = []
|
|
||||||
|
|
||||||
if compare is False:
|
|
||||||
return ''
|
|
||||||
else:
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config '
|
|
||||||
else:
|
|
||||||
cmd = 'show configuration '
|
|
||||||
|
|
||||||
cmd += ' '.join(to_list(flags))
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
|
|
||||||
"""
|
|
||||||
Generate diff between candidate and running configuration. If the
|
|
||||||
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
|
|
||||||
candidate and running configurations are not required to be passed as argument.
|
|
||||||
In case if onbox diff capability is not supported candidate argument is mandatory
|
|
||||||
and running argument is optional.
|
|
||||||
:param candidate: The configuration which is expected to be present on remote host.
|
|
||||||
:param running: The base configuration which is used to generate diff.
|
|
||||||
:param diff_match: Instructs how to match the candidate configuration with current device configuration
|
|
||||||
Valid values are 'line', 'strict', 'exact', 'none'.
|
|
||||||
'line' - commands are matched line by line
|
|
||||||
'strict' - command lines are matched with respect to position
|
|
||||||
'exact' - command lines must be an equal match
|
|
||||||
'none' - will not compare the candidate configuration with the running configuration
|
|
||||||
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
|
|
||||||
ignored during the diff. This is used for lines in the configuration
|
|
||||||
that are automatically updated by the system. This argument takes
|
|
||||||
a list of regular expressions or exact line matches.
|
|
||||||
:param path: The ordered set of parents that uniquely identify the section or hierarchy
|
|
||||||
the commands should be checked against. If the parents argument
|
|
||||||
is omitted, the commands are checked against the set of top
|
|
||||||
level or global commands.
|
|
||||||
:param diff_replace: Instructs on the way to perform the configuration on the device.
|
|
||||||
If the replace argument is set to I(line) then the modified lines are
|
|
||||||
pushed to the device in configuration mode. If the replace argument is
|
|
||||||
set to I(block) then the entire command block is pushed to the device in
|
|
||||||
configuration mode if any line is not correct.
|
|
||||||
:return: Configuration diff in json format.
|
|
||||||
{
|
|
||||||
'config_diff': '',
|
|
||||||
'banner_diff': {}
|
|
||||||
}
|
|
||||||
|
|
||||||
"""
|
|
||||||
diff = {}
|
|
||||||
device_operations = self.get_device_operations()
|
|
||||||
option_values = self.get_option_values()
|
|
||||||
|
|
||||||
if candidate is None and device_operations['supports_generate_diff']:
|
|
||||||
raise ValueError("candidate configuration is required to generate diff")
|
|
||||||
|
|
||||||
if diff_match not in option_values['diff_match']:
|
|
||||||
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
|
|
||||||
|
|
||||||
if diff_replace not in option_values['diff_replace']:
|
|
||||||
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
|
|
||||||
|
|
||||||
# prepare candidate configuration
|
|
||||||
candidate_obj = NetworkConfig(indent=1)
|
|
||||||
want_src, want_banners = self._extract_banners(candidate)
|
|
||||||
candidate_obj.load(want_src)
|
|
||||||
|
|
||||||
if running and diff_match != 'none':
|
|
||||||
# running configuration
|
|
||||||
have_src, have_banners = self._extract_banners(running)
|
|
||||||
|
|
||||||
running_obj = NetworkConfig(indent=1, contents=have_src, ignore_lines=diff_ignore_lines)
|
|
||||||
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
|
|
||||||
|
|
||||||
else:
|
|
||||||
configdiffobjs = candidate_obj.items
|
|
||||||
have_banners = {}
|
|
||||||
|
|
||||||
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
|
|
||||||
|
|
||||||
banners = self._diff_banners(want_banners, have_banners)
|
|
||||||
diff['banner_diff'] = banners if banners else {}
|
|
||||||
return diff
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
|
|
||||||
resp = {}
|
|
||||||
operations = self.get_device_operations()
|
|
||||||
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
requests = []
|
|
||||||
if commit:
|
|
||||||
prompt = self._connection.get_prompt()
|
|
||||||
if (b'(config-if' in prompt) or (b'(config' in prompt) or (b'(config-lag-if' in prompt):
|
|
||||||
self.send_command('end')
|
|
||||||
|
|
||||||
self.send_command('configure terminal')
|
|
||||||
|
|
||||||
for line in to_list(candidate):
|
|
||||||
if not isinstance(line, Mapping):
|
|
||||||
line = {'command': line}
|
|
||||||
|
|
||||||
cmd = line['command']
|
|
||||||
if cmd != 'end' and cmd[0] != '!':
|
|
||||||
results.append(self.send_command(**line))
|
|
||||||
requests.append(cmd)
|
|
||||||
|
|
||||||
self.send_command('end')
|
|
||||||
else:
|
|
||||||
raise ValueError('check mode is not supported')
|
|
||||||
|
|
||||||
resp['request'] = requests
|
|
||||||
resp['response'] = results
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, check_all=False):
|
|
||||||
if not command:
|
|
||||||
raise ValueError('must provide value of command to execute')
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for get" % output)
|
|
||||||
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, check_all=check_all)
|
|
||||||
|
|
||||||
def scp(self, command=None, scp_user=None, scp_pass=None):
|
|
||||||
if not command:
|
|
||||||
raise ValueError('must provide value of command to execute')
|
|
||||||
prompt = ["User name:", "Password:"]
|
|
||||||
if(scp_pass is None):
|
|
||||||
answer = [scp_user, self._connection._play_context.password]
|
|
||||||
else:
|
|
||||||
answer = [scp_user, scp_pass]
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=False, check_all=True)
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'icx'
|
|
||||||
reply = self.get(command='show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'Version (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1).strip(',')
|
|
||||||
|
|
||||||
match = re.search(r'^Cisco (.+) \(revision', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'^(.+) uptime', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_device_operations(self):
|
|
||||||
return {
|
|
||||||
'supports_diff_replace': True,
|
|
||||||
'supports_commit': False,
|
|
||||||
'supports_rollback': False,
|
|
||||||
'supports_defaults': True,
|
|
||||||
'supports_onbox_diff': False,
|
|
||||||
'supports_commit_comment': False,
|
|
||||||
'supports_multiline_delimiter': True,
|
|
||||||
'supports_diff_match': True,
|
|
||||||
'supports_diff_ignore_lines': True,
|
|
||||||
'supports_generate_diff': True,
|
|
||||||
'supports_replace': False
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_option_values(self):
|
|
||||||
return {
|
|
||||||
'format': ['text'],
|
|
||||||
'diff_match': ['line', 'strict', 'exact', 'none'],
|
|
||||||
'diff_replace': ['line', 'block'],
|
|
||||||
'output': []
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = dict()
|
|
||||||
result['rpc'] = self.get_base_rpc() + ['edit_banner', 'get_diff', 'run_commands', 'get_defaults_flag']
|
|
||||||
result['network_api'] = 'cliconf'
|
|
||||||
result['device_operations'] = self.get_device_operations()
|
|
||||||
result.update(self.get_option_values())
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def edit_banner(self, candidate=None, multiline_delimiter="@", commit=True):
|
|
||||||
"""
|
|
||||||
Edit banner on remote device
|
|
||||||
:param banners: Banners to be loaded in json format
|
|
||||||
:param multiline_delimiter: Line delimiter for banner
|
|
||||||
:param commit: Boolean value that indicates if the device candidate
|
|
||||||
configuration should be pushed in the running configuration or discarded.
|
|
||||||
:param diff: Boolean flag to indicate if configuration that is applied on remote host should
|
|
||||||
generated and returned in response or not
|
|
||||||
:return: Returns response of executing the configuration command received
|
|
||||||
from remote host
|
|
||||||
"""
|
|
||||||
resp = {}
|
|
||||||
banners_obj = json.loads(candidate)
|
|
||||||
results = []
|
|
||||||
requests = []
|
|
||||||
if commit:
|
|
||||||
for key, value in iteritems(banners_obj):
|
|
||||||
key += ' %s' % multiline_delimiter
|
|
||||||
self.send_command('config terminal', sendonly=True)
|
|
||||||
for cmd in [key, value, multiline_delimiter]:
|
|
||||||
obj = {'command': cmd, 'sendonly': True}
|
|
||||||
results.append(self.send_command(**obj))
|
|
||||||
requests.append(cmd)
|
|
||||||
|
|
||||||
self.send_command('end', sendonly=True)
|
|
||||||
time.sleep(0.1)
|
|
||||||
results.append(self.send_command('\n'))
|
|
||||||
requests.append('\n')
|
|
||||||
|
|
||||||
resp['request'] = requests
|
|
||||||
resp['response'] = results
|
|
||||||
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def run_commands(self, commands=None, check_rc=True):
|
|
||||||
if commands is None:
|
|
||||||
raise ValueError("'commands' value is required")
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if not isinstance(cmd, Mapping):
|
|
||||||
cmd = {'command': cmd}
|
|
||||||
|
|
||||||
output = cmd.pop('output', None)
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for run_commands" % output)
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = self.send_command(**cmd)
|
|
||||||
except AnsibleConnectionFailure as e:
|
|
||||||
if check_rc:
|
|
||||||
raise
|
|
||||||
out = getattr(e, 'err', to_text(e))
|
|
||||||
|
|
||||||
responses.append(out)
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
def _extract_banners(self, config):
|
|
||||||
banners = {}
|
|
||||||
banner_cmds = re.findall(r'^banner (\w+)', config, re.M)
|
|
||||||
for cmd in banner_cmds:
|
|
||||||
regex = r'banner %s \$(.+?)(?=\$)' % cmd
|
|
||||||
match = re.search(regex, config, re.S)
|
|
||||||
if match:
|
|
||||||
key = 'banner %s' % cmd
|
|
||||||
banners[key] = match.group(1).strip()
|
|
||||||
|
|
||||||
for cmd in banner_cmds:
|
|
||||||
regex = r'banner %s \$(.+?)(?=\$)' % cmd
|
|
||||||
match = re.search(regex, config, re.S)
|
|
||||||
if match:
|
|
||||||
config = config.replace(str(match.group(1)), '')
|
|
||||||
|
|
||||||
config = re.sub(r'banner \w+ \$\$', '!! banner removed', config)
|
|
||||||
return config, banners
|
|
||||||
|
|
||||||
def _diff_banners(self, want, have):
|
|
||||||
candidate = {}
|
|
||||||
for key, value in iteritems(want):
|
|
||||||
if value != have.get(key):
|
|
||||||
candidate[key] = value
|
|
||||||
return candidate
|
|
|
@ -1,95 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: ironware
|
|
||||||
short_description: Use ironware cliconf to run command on Extreme Ironware platform
|
|
||||||
description:
|
|
||||||
- This ironware plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Extreme Ironware network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_bytes, to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'ironware'
|
|
||||||
reply = self.send_command('show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'IronWare : Version (\S+),', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'^(?:System Mode\:|System\:) (CES|CER|MLX|XMR)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config'
|
|
||||||
if flags is not None:
|
|
||||||
cmd += ' ' + ' '.join(flags)
|
|
||||||
|
|
||||||
else:
|
|
||||||
cmd = 'show configuration'
|
|
||||||
if flags is not None:
|
|
||||||
raise ValueError("flags are only supported with running-config")
|
|
||||||
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, command):
|
|
||||||
for cmd in chain(['configure terminal'], to_list(command), ['end']):
|
|
||||||
self.send_command(cmd)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def set_cli_prompt_context(self):
|
|
||||||
"""
|
|
||||||
Make sure we are in the operational cli mode
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if self._connection.connected:
|
|
||||||
self._update_cli_prompt_context(config_context=')#')
|
|
|
@ -1,74 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: netvisor
|
|
||||||
short_description: Use netvisor cliconf to run command on Pluribus netvisor platform
|
|
||||||
description:
|
|
||||||
- This netvisor plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Pluribus netvisor devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
from ansible.plugins.cliconf import CliconfBase
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running'):
|
|
||||||
return self.invalid_params("fetching configuration from %s is not supported" % source)
|
|
||||||
cmd = 'show running-config'
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
def edit_config(self, command):
|
|
||||||
return
|
|
||||||
|
|
||||||
def get(self, command=None, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
|
|
||||||
if not command:
|
|
||||||
raise ValueError('must provide value of command to execute')
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for get" % output)
|
|
||||||
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_option_values(self):
|
|
||||||
return {
|
|
||||||
'format': ['text'],
|
|
||||||
'diff_match': ['line', 'strict', 'exact', 'none'],
|
|
||||||
'diff_replace': ['line', 'block'],
|
|
||||||
'output': []
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = dict()
|
|
||||||
result['rpc'] = self.get_base_rpc()
|
|
||||||
result['network_api'] = 'cliconf'
|
|
||||||
result['device_info'] = self.get_device_info()
|
|
||||||
result.update(self.get_option_values())
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
device_info['network_os'] = 'netvisor'
|
|
||||||
|
|
||||||
return device_info
|
|
|
@ -1,112 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2018 Extreme Networks Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: nos
|
|
||||||
short_description: Use nos cliconf to run command on Extreme NOS platform
|
|
||||||
description:
|
|
||||||
- This nos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Extreme NOS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'nos'
|
|
||||||
reply = self.get('show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'Network Operating System Version: (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
reply = self.get('show chassis')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'^Chassis Name:(\s+)(\S+)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(2)
|
|
||||||
|
|
||||||
reply = self.get('show running-config | inc "switch-attributes host-name"')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'switch-attributes host-name (\S+)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_config(self, source='running', flags=None):
|
|
||||||
if source not in 'running':
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config'
|
|
||||||
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
cmd += ' '.join(flags)
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
def edit_config(self, command):
|
|
||||||
resp = {}
|
|
||||||
results = []
|
|
||||||
requests = []
|
|
||||||
self.send_command('configure terminal')
|
|
||||||
for cmd in to_list(command):
|
|
||||||
if isinstance(cmd, dict):
|
|
||||||
command = cmd['command']
|
|
||||||
prompt = cmd['prompt']
|
|
||||||
answer = cmd['answer']
|
|
||||||
newline = cmd.get('newline', True)
|
|
||||||
else:
|
|
||||||
command = cmd
|
|
||||||
prompt = None
|
|
||||||
answer = None
|
|
||||||
newline = True
|
|
||||||
|
|
||||||
if cmd != 'end' and cmd[0] != '!':
|
|
||||||
results.append(self.send_command(command, prompt, answer, False, newline))
|
|
||||||
requests.append(cmd)
|
|
||||||
|
|
||||||
self.send_command('end')
|
|
||||||
|
|
||||||
resp['request'] = requests
|
|
||||||
resp['response'] = results
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
|
@ -1,77 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: onyx
|
|
||||||
short_description: Use onyx cliconf to run command on Mellanox ONYX platform
|
|
||||||
description:
|
|
||||||
- This onyx plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Mellanox ONYX network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
reply = self.get('show version | json-print')
|
|
||||||
data = json.loads(reply)
|
|
||||||
device_info['network_os'] = data['Product name']
|
|
||||||
device_info['network_os_version'] = data['Product release']
|
|
||||||
device_info['network_os_version_summary'] = data['Version summary']
|
|
||||||
device_info['network_os_model'] = data['Product model']
|
|
||||||
|
|
||||||
reply = self.get('show hosts | include Hostname')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
hostname = data.split(':')[1]
|
|
||||||
hostname = hostname.strip()
|
|
||||||
device_info['network_os_hostname'] = hostname
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
if source not in ('running',):
|
|
||||||
return self.invalid_params("fetching configuration from %s is not supported" % source)
|
|
||||||
cmd = 'show running-config'
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, command):
|
|
||||||
for cmd in chain(['configure terminal'], to_list(command), ['exit']):
|
|
||||||
self.send_command(cmd)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
|
@ -1,78 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: routeros
|
|
||||||
short_description: Use routeros cliconf to run command on MikroTik RouterOS platform
|
|
||||||
description:
|
|
||||||
- This routeros plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from MikroTik RouterOS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_bytes, to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
device_info['network_os'] = 'RouterOS'
|
|
||||||
|
|
||||||
resource = self.get('/system resource print')
|
|
||||||
data = to_text(resource, errors='surrogate_or_strict').strip()
|
|
||||||
match = re.search(r'version: (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
routerboard = self.get('/system routerboard print')
|
|
||||||
data = to_text(routerboard, errors='surrogate_or_strict').strip()
|
|
||||||
match = re.search(r'model: (.+)$', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
identity = self.get('/system identity print')
|
|
||||||
data = to_text(identity, errors='surrogate_or_strict').strip()
|
|
||||||
match = re.search(r'name: (.+)$', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
return
|
|
||||||
|
|
||||||
def edit_config(self, command):
|
|
||||||
return
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
|
@ -1,104 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2018 Extreme Networks Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: slxos
|
|
||||||
short_description: Use slxos cliconf to run command on Extreme SLX-OS platform
|
|
||||||
description:
|
|
||||||
- This slxos plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Extreme SLX-OS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_bytes, to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.cliconf import CliconfBase
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'slxos'
|
|
||||||
reply = self.get('show version')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'SLX\-OS Operating System Version: (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
reply = self.get('show chassis')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'^Chassis Name:(\s+)(\S+)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(2)
|
|
||||||
|
|
||||||
reply = self.get('show running-config | inc "switch-attributes host-name"')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'switch-attributes host-name (\S+)', data, re.M)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_config(self, source='running', flags=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config'
|
|
||||||
else:
|
|
||||||
cmd = 'show startup-config'
|
|
||||||
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
cmd += ' '.join(flags)
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
def edit_config(self, command):
|
|
||||||
for cmd in chain(['configure terminal'], to_list(command), ['end']):
|
|
||||||
if isinstance(cmd, dict):
|
|
||||||
command = cmd['command']
|
|
||||||
prompt = cmd['prompt']
|
|
||||||
answer = cmd['answer']
|
|
||||||
newline = cmd.get('newline', True)
|
|
||||||
else:
|
|
||||||
command = cmd
|
|
||||||
prompt = None
|
|
||||||
answer = None
|
|
||||||
newline = True
|
|
||||||
|
|
||||||
self.send_command(command, prompt, answer, False, newline)
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
return json.dumps(result)
|
|
|
@ -1,235 +0,0 @@
|
||||||
#
|
|
||||||
# (c) 2018 Extreme Networks Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
cliconf: voss
|
|
||||||
short_description: Use voss cliconf to run command on Extreme VOSS platform
|
|
||||||
description:
|
|
||||||
- This voss plugin provides low level abstraction apis for
|
|
||||||
sending and receiving CLI commands from Extreme VOSS network devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.voss.voss import VossNetworkConfig
|
|
||||||
from ansible.plugins.cliconf import CliconfBase, enable_mode
|
|
||||||
|
|
||||||
|
|
||||||
class Cliconf(CliconfBase):
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def get_config(self, source='running', flags=None, format=None):
|
|
||||||
if source not in ('running', 'startup'):
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
|
|
||||||
if format:
|
|
||||||
raise ValueError("'format' value %s is not supported for get_config" % format)
|
|
||||||
|
|
||||||
if not flags:
|
|
||||||
flags = []
|
|
||||||
if source == 'running':
|
|
||||||
cmd = 'show running-config '
|
|
||||||
cmd += ' '.join(to_list(flags))
|
|
||||||
cmd = cmd.strip()
|
|
||||||
else:
|
|
||||||
cmd = 'more /intflash/config.cfg'
|
|
||||||
|
|
||||||
return self.send_command(cmd)
|
|
||||||
|
|
||||||
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
|
|
||||||
"""
|
|
||||||
Generate diff between candidate and running configuration. If the
|
|
||||||
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
|
|
||||||
candidate and running configurations are not required to be passed as argument.
|
|
||||||
In case if onbox diff capability is not supported candidate argument is mandatory
|
|
||||||
and running argument is optional.
|
|
||||||
:param candidate: The configuration which is expected to be present on remote host.
|
|
||||||
:param running: The base configuration which is used to generate diff.
|
|
||||||
:param diff_match: Instructs how to match the candidate configuration with current device configuration
|
|
||||||
Valid values are 'line', 'strict', 'exact', 'none'.
|
|
||||||
'line' - commands are matched line by line
|
|
||||||
'strict' - command lines are matched with respect to position
|
|
||||||
'exact' - command lines must be an equal match
|
|
||||||
'none' - will not compare the candidate configuration with the running configuration
|
|
||||||
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
|
|
||||||
ignored during the diff. This is used for lines in the configuration
|
|
||||||
that are automatically updated by the system. This argument takes
|
|
||||||
a list of regular expressions or exact line matches.
|
|
||||||
:param path: The ordered set of parents that uniquely identify the section or hierarchy
|
|
||||||
the commands should be checked against. If the parents argument
|
|
||||||
is omitted, the commands are checked against the set of top
|
|
||||||
level or global commands.
|
|
||||||
:param diff_replace: Instructs on the way to perform the configuration on the device.
|
|
||||||
If the replace argument is set to I(line) then the modified lines are
|
|
||||||
pushed to the device in configuration mode. If the replace argument is
|
|
||||||
set to I(block) then the entire command block is pushed to the device in
|
|
||||||
configuration mode if any line is not correct.
|
|
||||||
:return: Configuration diff in json format.
|
|
||||||
{
|
|
||||||
'config_diff': '',
|
|
||||||
}
|
|
||||||
|
|
||||||
"""
|
|
||||||
diff = {}
|
|
||||||
|
|
||||||
device_operations = self.get_device_operations()
|
|
||||||
option_values = self.get_option_values()
|
|
||||||
|
|
||||||
if candidate is None and device_operations['supports_generate_diff']:
|
|
||||||
raise ValueError("candidate configuration is required to generate diff")
|
|
||||||
|
|
||||||
if diff_match not in option_values['diff_match']:
|
|
||||||
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
|
|
||||||
|
|
||||||
if diff_replace not in option_values['diff_replace']:
|
|
||||||
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
|
|
||||||
|
|
||||||
# prepare candidate configuration
|
|
||||||
candidate_obj = VossNetworkConfig(indent=0, ignore_lines=diff_ignore_lines)
|
|
||||||
candidate_obj.load(candidate)
|
|
||||||
|
|
||||||
if running and diff_match != 'none':
|
|
||||||
# running configuration
|
|
||||||
running_obj = VossNetworkConfig(indent=0, contents=running, ignore_lines=diff_ignore_lines)
|
|
||||||
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
|
|
||||||
|
|
||||||
else:
|
|
||||||
configdiffobjs = candidate_obj.items
|
|
||||||
|
|
||||||
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
|
|
||||||
diff['diff_path'] = path
|
|
||||||
diff['diff_replace'] = diff_replace
|
|
||||||
return diff
|
|
||||||
|
|
||||||
@enable_mode
|
|
||||||
def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
|
|
||||||
resp = {}
|
|
||||||
operations = self.get_device_operations()
|
|
||||||
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
requests = []
|
|
||||||
if commit:
|
|
||||||
self.send_command('configure terminal')
|
|
||||||
for line in to_list(candidate):
|
|
||||||
if not isinstance(line, Mapping):
|
|
||||||
line = {'command': line}
|
|
||||||
|
|
||||||
cmd = line['command']
|
|
||||||
if cmd != 'end' and cmd[0] != '!':
|
|
||||||
results.append(self.send_command(**line))
|
|
||||||
requests.append(cmd)
|
|
||||||
|
|
||||||
self.send_command('end')
|
|
||||||
else:
|
|
||||||
raise ValueError('check mode is not supported')
|
|
||||||
|
|
||||||
resp['request'] = requests
|
|
||||||
resp['response'] = results
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
|
|
||||||
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
|
|
||||||
device_info['network_os'] = 'voss'
|
|
||||||
reply = self.get(command='show sys-info')
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'SysDescr\s+: \S+ \((\S+)\)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'Chassis\s+: (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'SysName\s+: (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_device_operations(self):
|
|
||||||
return {
|
|
||||||
'supports_diff_replace': True,
|
|
||||||
'supports_commit': False,
|
|
||||||
'supports_rollback': False,
|
|
||||||
'supports_defaults': True,
|
|
||||||
'supports_onbox_diff': False,
|
|
||||||
'supports_commit_comment': False,
|
|
||||||
'supports_multiline_delimiter': False,
|
|
||||||
'supports_diff_match': True,
|
|
||||||
'supports_diff_ignore_lines': True,
|
|
||||||
'supports_generate_diff': True,
|
|
||||||
'supports_replace': False
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_option_values(self):
|
|
||||||
return {
|
|
||||||
'format': ['text'],
|
|
||||||
'diff_match': ['line', 'strict', 'exact', 'none'],
|
|
||||||
'diff_replace': ['line', 'block'],
|
|
||||||
'output': []
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = super(Cliconf, self).get_capabilities()
|
|
||||||
result['rpc'] += ['get_diff', 'run_commands', 'get_defaults_flag']
|
|
||||||
result['device_operations'] = self.get_device_operations()
|
|
||||||
result.update(self.get_option_values())
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def run_commands(self, commands=None, check_rc=True):
|
|
||||||
if commands is None:
|
|
||||||
raise ValueError("'commands' value is required")
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if not isinstance(cmd, Mapping):
|
|
||||||
cmd = {'command': cmd}
|
|
||||||
|
|
||||||
output = cmd.pop('output', None)
|
|
||||||
if output:
|
|
||||||
raise ValueError("'output' value %s is not supported for run_commands" % output)
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = self.send_command(**cmd)
|
|
||||||
except AnsibleConnectionFailure as e:
|
|
||||||
if check_rc:
|
|
||||||
raise
|
|
||||||
out = getattr(e, 'err', e)
|
|
||||||
|
|
||||||
responses.append(out)
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
def get_defaults_flag(self):
|
|
||||||
return 'verbose'
|
|
|
@ -1,45 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2016, John Barker <jobarker@redhat.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Hostname or IP of the A10 Networks device.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- An account with administrator privileges.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
aliases: [ admin, user ]
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Password for the C(username) account.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
aliases: [ pass, pwd ]
|
|
||||||
write_config:
|
|
||||||
description:
|
|
||||||
- If C(yes), any changes will cause a write of the running configuration
|
|
||||||
to non-volatile memory. This will save I(all) configuration changes,
|
|
||||||
including those that may have been made manually or through other modules,
|
|
||||||
so care should be taken when specifying C(yes).
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- If C(no), SSL certificates will not be validated.
|
|
||||||
- This should only be used on personally controlled devices using self-signed certificates.
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
notes:
|
|
||||||
- Requires A10 Networks aXAPI 2.1.
|
|
||||||
'''
|
|
|
@ -1,55 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2017, James Mighion <@jmighion>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote device over the specified transport.
|
|
||||||
- The value of host is used as the destination address for the transport.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Specifies the port to use when building the connection to the remote device.
|
|
||||||
type: int
|
|
||||||
default: 22
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to the remote device.
|
|
||||||
- This value is used to authenticate the SSH session.
|
|
||||||
- If the value is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_USERNAME) will be used instead.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to the remote device.
|
|
||||||
- This value is used to authenticate the SSH session.
|
|
||||||
- If the value is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_PASSWORD) will be used instead.
|
|
||||||
type: str
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- Specifies the timeout in seconds for communicating with the network device
|
|
||||||
for either connecting or sending commands.
|
|
||||||
- If the timeout is exceeded before the operation is completed, the module will error.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
ssh_keyfile:
|
|
||||||
description:
|
|
||||||
- Specifies the SSH key to use to authenticate the connection to the remote device.
|
|
||||||
- This value is the path to the key used to authenticate the SSH session.
|
|
||||||
- If the value is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
|
|
||||||
type: path
|
|
||||||
'''
|
|
|
@ -1,58 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2017, James Mighion <@jmighion>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote
|
|
||||||
device over the specified transport. The value of host is used as
|
|
||||||
the destination address for the transport.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Specifies the port to use when building the connection to the remote.
|
|
||||||
device.
|
|
||||||
type: int
|
|
||||||
default: 22
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
|
|
||||||
type: str
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- Specifies the timeout in seconds for communicating with the network device
|
|
||||||
for either connecting or sending commands. If the timeout is
|
|
||||||
exceeded before the operation is completed, the module will error.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
ssh_keyfile:
|
|
||||||
description:
|
|
||||||
- Specifies the SSH key to use to authenticate the connection to
|
|
||||||
the remote device. This value is the path to the
|
|
||||||
key used to authenticate the SSH session. If the value is not specified
|
|
||||||
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
|
|
||||||
will be used instead.
|
|
||||||
type: path
|
|
||||||
'''
|
|
|
@ -1,96 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Created on December 12, 2016
|
|
||||||
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
|
|
||||||
# Avi Version: 16.3.4
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
# Avi common documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
controller:
|
|
||||||
description:
|
|
||||||
- IP address or hostname of the controller. The default value is the environment variable C(AVI_CONTROLLER).
|
|
||||||
type: str
|
|
||||||
default: ''
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Username used for accessing Avi controller. The default value is the environment variable C(AVI_USERNAME).
|
|
||||||
type: str
|
|
||||||
default: ''
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Password of Avi user in Avi controller. The default value is the environment variable C(AVI_PASSWORD).
|
|
||||||
type: str
|
|
||||||
default: ''
|
|
||||||
tenant:
|
|
||||||
description:
|
|
||||||
- Name of tenant used for all Avi API calls and context of object.
|
|
||||||
type: str
|
|
||||||
default: admin
|
|
||||||
tenant_uuid:
|
|
||||||
description:
|
|
||||||
- UUID of tenant used for all Avi API calls and context of object.
|
|
||||||
type: str
|
|
||||||
default: ''
|
|
||||||
api_version:
|
|
||||||
description:
|
|
||||||
- Avi API version of to use for Avi API and objects.
|
|
||||||
type: str
|
|
||||||
default: 16.4.4
|
|
||||||
avi_credentials:
|
|
||||||
description:
|
|
||||||
- Avi Credentials dictionary which can be used in lieu of enumerating Avi Controller login details.
|
|
||||||
suboptions:
|
|
||||||
controller:
|
|
||||||
description:
|
|
||||||
- Avi controller IP or SQDN
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Avi controller username
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Avi controller password
|
|
||||||
api_version:
|
|
||||||
description:
|
|
||||||
- Avi controller version
|
|
||||||
default: 16.4.4
|
|
||||||
tenant:
|
|
||||||
description:
|
|
||||||
- Avi controller tenant
|
|
||||||
default: admin
|
|
||||||
tenant_uuid:
|
|
||||||
description:
|
|
||||||
- Avi controller tenant UUID
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Avi controller port
|
|
||||||
token:
|
|
||||||
description:
|
|
||||||
- Avi controller API token
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- Avi controller request timeout
|
|
||||||
default: 300
|
|
||||||
session_id:
|
|
||||||
description:
|
|
||||||
- Avi controller API session id to reuse existing session with csrftoken
|
|
||||||
csrftoken:
|
|
||||||
description:
|
|
||||||
- Avi controller API csrftoken to reuse existing session with session id
|
|
||||||
type: dict
|
|
||||||
api_context:
|
|
||||||
description:
|
|
||||||
- Avi API context that includes current session ID and CSRF Token.
|
|
||||||
- This allows user to perform single login and re-use the session.
|
|
||||||
type: dict
|
|
||||||
avi_disable_session_cache_as_fact:
|
|
||||||
description:
|
|
||||||
- It disables avi session information to be cached as a fact.
|
|
||||||
type: bool
|
|
||||||
|
|
||||||
notes:
|
|
||||||
- For more information on using Ansible to manage Avi Network devices see U(https://www.ansible.com/ansible-avi-networks).
|
|
||||||
'''
|
|
|
@ -1,60 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote
|
|
||||||
device over the specified transport. The value of host is used as
|
|
||||||
the destination address for the transport.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Specifies the port to use when building the connection to the remote
|
|
||||||
device. This value applies to either I(cli) or I(netconf). The port
|
|
||||||
value will default to the appropriate transport common port if
|
|
||||||
none is provided in the task. (cli=22, netconf=22).
|
|
||||||
type: int
|
|
||||||
default: 0 (use common port)
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate the CLI login.
|
|
||||||
If the value is not specified in the task, the value of environment
|
|
||||||
variable C(ANSIBLE_NET_USERNAME) will be used instead.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to
|
|
||||||
the remote device. This is a common argument used for cli
|
|
||||||
transports. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
|
|
||||||
type: str
|
|
||||||
ssh_keyfile:
|
|
||||||
description:
|
|
||||||
- Specifies the SSH key to use to authenticate the connection to
|
|
||||||
the remote device. This argument is used for the I(cli)
|
|
||||||
transport. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
|
|
||||||
type: path
|
|
||||||
transport:
|
|
||||||
description:
|
|
||||||
- Configures the transport connection to use when connecting to the
|
|
||||||
remote device. The transport argument supports connectivity to the
|
|
||||||
device over cli (ssh).
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
choices: [ cli, netconf ]
|
|
||||||
default: cli
|
|
||||||
'''
|
|
|
@ -1,78 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2017, Lenovo, Inc.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
# Standard CNOS documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
outputfile:
|
|
||||||
description:
|
|
||||||
- This specifies the file path where the output of each command
|
|
||||||
execution is saved. Each command that is specified in the merged
|
|
||||||
template file and each response from the device are saved here.
|
|
||||||
Usually the location is the results folder, but you can
|
|
||||||
choose another location based on your write permission.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- This is the variable used to search the hosts file at
|
|
||||||
/etc/ansible/hosts and identify the IP address of the device on
|
|
||||||
which the template is going to be applied. Usually the Ansible
|
|
||||||
keyword {{ inventory_hostname }} is specified in the playbook as
|
|
||||||
an abstraction of the group of network elements that need to be
|
|
||||||
configured.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username used to authenticate the connection to
|
|
||||||
the remote device. The value of the username parameter is used to
|
|
||||||
authenticate the SSH session. While generally the value should
|
|
||||||
come from the inventory file, you can also specify it as a
|
|
||||||
variable. This parameter is optional. If it is not specified, no
|
|
||||||
default value will be used.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Configures the password used to authenticate the connection to
|
|
||||||
the remote device. The value of the password parameter is used to
|
|
||||||
authenticate the SSH session. While generally the value should
|
|
||||||
come from the inventory file, you can also specify it as a
|
|
||||||
variable. This parameter is optional. If it is not specified, no
|
|
||||||
default value will be used.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
enablePassword:
|
|
||||||
description:
|
|
||||||
- Configures the password used to enter Global Configuration
|
|
||||||
command mode on the switch. If the switch does not request this
|
|
||||||
password, the parameter is ignored.While generally the value
|
|
||||||
should come from the inventory file, you can also specify it as a
|
|
||||||
variable. This parameter is optional. If it is not specified,
|
|
||||||
no default value will be used.
|
|
||||||
type: str
|
|
||||||
deviceType:
|
|
||||||
description:
|
|
||||||
- This specifies the type of device where the method is executed.
|
|
||||||
The choices NE1072T,NE1032,NE1032T,NE10032,NE2572 are added
|
|
||||||
since Ansible 2.4. The choice NE0152T is added since 2.8
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
choices:
|
|
||||||
- g8272_cnos
|
|
||||||
- g8296_cnos
|
|
||||||
- g8332_cnos
|
|
||||||
- NE0152T
|
|
||||||
- NE1072T
|
|
||||||
- NE1032
|
|
||||||
- NE1032T
|
|
||||||
- NE10032
|
|
||||||
- NE2572
|
|
||||||
notes:
|
|
||||||
- For more information on using Ansible to manage Lenovo Network devices see U(https://www.ansible.com/ansible-lenovo).
|
|
||||||
'''
|
|
|
@ -1,90 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2017, Red Hat Inc.
|
|
||||||
# Copyright: (c) 2017, Lenovo.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
authorize:
|
|
||||||
description:
|
|
||||||
- Instructs the module to enter privileged mode on the remote device
|
|
||||||
before sending any commands. If not specified, the device will
|
|
||||||
attempt to execute all commands in non-privileged mode. If the value
|
|
||||||
is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
auth_pass:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use if required to enter privileged mode
|
|
||||||
on the remote device. If I(authorize) is false, then this argument
|
|
||||||
does nothing. If the value is not specified in the task, the value of
|
|
||||||
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote
|
|
||||||
device over the specified transport. The value of host is used as
|
|
||||||
the destination address for the transport.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Specifies the port to use when building the connection to the remote device.
|
|
||||||
type: int
|
|
||||||
default: 22
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
|
|
||||||
type: str
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- Specifies the timeout in seconds for communicating with the network device
|
|
||||||
for either connecting or sending commands. If the timeout is
|
|
||||||
exceeded before the operation is completed, the module will error.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
ssh_keyfile:
|
|
||||||
description:
|
|
||||||
- Specifies the SSH key to use to authenticate the connection to
|
|
||||||
the remote device. This value is the path to the
|
|
||||||
key used to authenticate the SSH session. If the value is not specified
|
|
||||||
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
|
|
||||||
will be used instead.
|
|
||||||
type: path
|
|
||||||
authorize:
|
|
||||||
description:
|
|
||||||
- Instructs the module to enter privileged mode on the remote device
|
|
||||||
before sending any commands. If not specified, the device will
|
|
||||||
attempt to execute all commands in non-privileged mode. If the value
|
|
||||||
is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
auth_pass:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use if required to enter privileged mode
|
|
||||||
on the remote device. If I(authorize) is false, then this argument
|
|
||||||
does nothing. If the value is not specified in the task, the value of
|
|
||||||
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
|
|
||||||
type: str
|
|
||||||
'''
|
|
|
@ -1,61 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2018, Ingate Systems AB
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
client:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
suboptions:
|
|
||||||
version:
|
|
||||||
description:
|
|
||||||
- REST API version.
|
|
||||||
type: str
|
|
||||||
choices: [ v1 ]
|
|
||||||
default: v1
|
|
||||||
scheme:
|
|
||||||
description:
|
|
||||||
- Which HTTP protocol to use.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
choices: [ http, https ]
|
|
||||||
address:
|
|
||||||
description:
|
|
||||||
- The hostname or IP address to the unit.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- The username of the REST API user.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- The password for the REST API user.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Which HTTP(S) port to connect to.
|
|
||||||
type: int
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- The timeout (in seconds) for REST API requests.
|
|
||||||
type: int
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- Verify the unit's HTTPS certificate.
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
aliases: [ verify_ssl ]
|
|
||||||
notes:
|
|
||||||
- This module requires that the Ingate Python SDK is installed on the
|
|
||||||
host. To install the SDK use the pip command from your shell
|
|
||||||
C(pip install ingatesdk).
|
|
||||||
requirements:
|
|
||||||
- ingatesdk >= 1.0.6
|
|
||||||
'''
|
|
|
@ -1,93 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2017, Paul Baker <@paulquack>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
authorize:
|
|
||||||
description:
|
|
||||||
- B(Deprecated)
|
|
||||||
- "Starting with Ansible 2.7 we recommend using C(connection: network_cli) and C(become: yes)."
|
|
||||||
- For more information please see the L(IronWare Platform Options guide, ../network/user_guide/platform_ironware.html).
|
|
||||||
- HORIZONTALLINE
|
|
||||||
- Instructs the module to enter privileged mode on the remote device
|
|
||||||
before sending any commands. If not specified, the device will
|
|
||||||
attempt to execute all commands in non-privileged mode. If the value
|
|
||||||
is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- B(Deprecated)
|
|
||||||
- "Starting with Ansible 2.7 we recommend using C(connection: network_cli) and C(become: yes)."
|
|
||||||
- For more information please see the L(IronWare Platform Options guide, ../network/user_guide/platform_ironware.html).
|
|
||||||
- HORIZONTALLINE
|
|
||||||
- A dict object containing connection details.
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote
|
|
||||||
device over the specified transport. The value of host is used as
|
|
||||||
the destination address for the transport.
|
|
||||||
type: str
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Specifies the port to use when building the connection to the remote
|
|
||||||
device.
|
|
||||||
type: int
|
|
||||||
default: 22
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
|
|
||||||
type: str
|
|
||||||
ssh_keyfile:
|
|
||||||
description:
|
|
||||||
- Specifies the SSH key to use to authenticate the connection to
|
|
||||||
the remote device. This value is the path to the
|
|
||||||
key used to authenticate the SSH session. If the value is not specified
|
|
||||||
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
|
|
||||||
will be used instead.
|
|
||||||
type: path
|
|
||||||
authorize:
|
|
||||||
description:
|
|
||||||
- Instructs the module to enter privileged mode on the remote device
|
|
||||||
before sending any commands. If not specified, the device will
|
|
||||||
attempt to execute all commands in non-privileged mode. If the value
|
|
||||||
is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
auth_pass:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use if required to enter privileged mode
|
|
||||||
on the remote device. If I(authorize) is false, then this argument
|
|
||||||
does nothing. If the value is not specified in the task, the value of
|
|
||||||
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
|
|
||||||
type: str
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- Specifies idle timeout in seconds for the connection, in seconds. Useful
|
|
||||||
if the console freezes before continuing. For example when saving
|
|
||||||
configurations.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
notes:
|
|
||||||
- For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide <network_guide>`
|
|
||||||
'''
|
|
|
@ -1,65 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
|
|
||||||
options:
|
|
||||||
nsip:
|
|
||||||
description:
|
|
||||||
- The ip address of the netscaler appliance where the nitro API calls will be made.
|
|
||||||
- "The port can be specified with the colon (:). E.g. 192.168.1.1:555."
|
|
||||||
type: str
|
|
||||||
required: True
|
|
||||||
|
|
||||||
nitro_user:
|
|
||||||
description:
|
|
||||||
- The username with which to authenticate to the netscaler node.
|
|
||||||
type: str
|
|
||||||
required: True
|
|
||||||
|
|
||||||
nitro_pass:
|
|
||||||
description:
|
|
||||||
- The password with which to authenticate to the netscaler node.
|
|
||||||
type: str
|
|
||||||
required: True
|
|
||||||
|
|
||||||
nitro_protocol:
|
|
||||||
description:
|
|
||||||
- Which protocol to use when accessing the nitro API objects.
|
|
||||||
type: str
|
|
||||||
choices: [ http, https ]
|
|
||||||
default: http
|
|
||||||
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
|
|
||||||
nitro_timeout:
|
|
||||||
description:
|
|
||||||
- Time in seconds until a timeout error is thrown when establishing a new session with Netscaler
|
|
||||||
type: float
|
|
||||||
default: 310
|
|
||||||
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- The state of the resource being configured by the module on the netscaler node.
|
|
||||||
- When present the resource will be created if needed and configured according to the module's parameters.
|
|
||||||
- When absent the resource will be deleted from the netscaler node.
|
|
||||||
type: str
|
|
||||||
choices: [ absent, present ]
|
|
||||||
default: present
|
|
||||||
|
|
||||||
save_config:
|
|
||||||
description:
|
|
||||||
- If C(yes) the module will save the configuration on the netscaler node if it makes any changes.
|
|
||||||
- The module will not save the configuration on the netscaler node if it made no changes.
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
notes:
|
|
||||||
- For more information on using Ansible to manage Citrix NetScaler Network devices see U(https://www.ansible.com/ansible-netscaler).
|
|
||||||
'''
|
|
|
@ -1,33 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2017, Cisco and/or its affiliates.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
url:
|
|
||||||
description: NSO JSON-RPC URL, http://localhost:8080/jsonrpc
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
username:
|
|
||||||
description: NSO username
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
password:
|
|
||||||
description: NSO password
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
timeout:
|
|
||||||
description: JSON-RPC request timeout in seconds
|
|
||||||
type: int
|
|
||||||
default: 300
|
|
||||||
validate_certs:
|
|
||||||
description: When set to true, validates the SSL certificate of NSO when
|
|
||||||
using SSL
|
|
||||||
type: bool
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
'''
|
|
|
@ -1,73 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote
|
|
||||||
device over the specified transport. The value of host is used as
|
|
||||||
the destination address for the transport.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Specifies the port to use when building the connection to the remote device.
|
|
||||||
type: int
|
|
||||||
default: 22
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
|
|
||||||
type: str
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- Specifies the timeout in seconds for communicating with the network device
|
|
||||||
for either connecting or sending commands. If the timeout is
|
|
||||||
exceeded before the operation is completed, the module will error.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
ssh_keyfile:
|
|
||||||
description:
|
|
||||||
- Specifies the SSH key to use to authenticate the connection to
|
|
||||||
the remote device. This value is the path to the
|
|
||||||
key used to authenticate the SSH session. If the value is not specified
|
|
||||||
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
|
|
||||||
will be used instead.
|
|
||||||
type: path
|
|
||||||
authorize:
|
|
||||||
description:
|
|
||||||
- Instructs the module to enter privileged mode on the remote device
|
|
||||||
before sending any commands. If not specified, the device will
|
|
||||||
attempt to execute all commands in non-privileged mode. If the value
|
|
||||||
is not specified in the task, the value of environment variable
|
|
||||||
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
auth_pass:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use if required to enter privileged mode
|
|
||||||
on the remote device. If I(authorize) is false, then this argument
|
|
||||||
does nothing. If the value is not specified in the task, the value of
|
|
||||||
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
|
|
||||||
type: str
|
|
||||||
'''
|
|
|
@ -1,245 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
|
|
||||||
# Copyright: (c) 2018, Kevin Breit (@kbreit)
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
ip_address:
|
|
||||||
description:
|
|
||||||
- IP address (or hostname) of PAN-OS device.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Password for authentication.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Username for authentication.
|
|
||||||
type: str
|
|
||||||
default: admin
|
|
||||||
'''
|
|
||||||
|
|
||||||
PROVIDER = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
version_added: '2.8'
|
|
||||||
required: true
|
|
||||||
suboptions:
|
|
||||||
ip_address:
|
|
||||||
description:
|
|
||||||
- The IP address or hostname of the PAN-OS device being configured.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- The username to use for authentication. This is ignored if
|
|
||||||
I(api_key) is specified.
|
|
||||||
type: str
|
|
||||||
default: 'admin'
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- The password to use for authentication. This is ignored if
|
|
||||||
I(api_key) is specified.
|
|
||||||
type: str
|
|
||||||
api_key:
|
|
||||||
description:
|
|
||||||
- The API key to use instead of generating it using
|
|
||||||
I(username) / I(password).
|
|
||||||
type: str
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- The port number to connect to the PAN-OS device on.
|
|
||||||
type: int
|
|
||||||
default: 443
|
|
||||||
serial_number:
|
|
||||||
description:
|
|
||||||
- The serial number of a firewall to use for targeted commands.
|
|
||||||
If I(ip_address) is not a Panorama PAN-OS device, then
|
|
||||||
this param is ignored.
|
|
||||||
type: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
TRANSITIONAL_PROVIDER = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
version_added: '2.8'
|
|
||||||
suboptions:
|
|
||||||
ip_address:
|
|
||||||
description:
|
|
||||||
- The IP address or hostname of the PAN-OS device being configured.
|
|
||||||
type: str
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- The username to use for authentication. This is ignored if
|
|
||||||
I(api_key) is specified.
|
|
||||||
type: str
|
|
||||||
default: 'admin'
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- The password to use for authentication. This is ignored if
|
|
||||||
I(api_key) is specified.
|
|
||||||
type: str
|
|
||||||
api_key:
|
|
||||||
description:
|
|
||||||
- The API key to use instead of generating it using
|
|
||||||
I(username) / I(password).
|
|
||||||
type: str
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- The port number to connect to the PAN-OS device on.
|
|
||||||
type: int
|
|
||||||
default: 443
|
|
||||||
serial_number:
|
|
||||||
description:
|
|
||||||
- The serial number of a firewall to use for targeted commands.
|
|
||||||
If I(ip_address) is not a Panorama PAN-OS device, then
|
|
||||||
this param is ignored.
|
|
||||||
type: str
|
|
||||||
ip_address:
|
|
||||||
description:
|
|
||||||
- B(Deprecated)
|
|
||||||
- Use I(provider) to specify PAN-OS connectivity instead.
|
|
||||||
- HORIZONTALLINE
|
|
||||||
- The IP address or hostname of the PAN-OS device being configured.
|
|
||||||
type: str
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- B(Deprecated)
|
|
||||||
- Use I(provider) to specify PAN-OS connectivity instead.
|
|
||||||
- HORIZONTALLINE
|
|
||||||
- The username to use for authentication. This is ignored if
|
|
||||||
I(api_key) is specified.
|
|
||||||
type: str
|
|
||||||
default: 'admin'
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- B(Deprecated)
|
|
||||||
- Use I(provider) to specify PAN-OS connectivity instead.
|
|
||||||
- HORIZONTALLINE
|
|
||||||
- The password to use for authentication. This is ignored if
|
|
||||||
I(api_key) is specified.
|
|
||||||
type: str
|
|
||||||
api_key:
|
|
||||||
description:
|
|
||||||
- B(Deprecated)
|
|
||||||
- Use I(provider) to specify PAN-OS connectivity instead.
|
|
||||||
- HORIZONTALLINE
|
|
||||||
- The API key to use instead of generating it using
|
|
||||||
I(username) / I(password).
|
|
||||||
type: str
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- B(Deprecated)
|
|
||||||
- Use I(provider) to specify PAN-OS connectivity instead.
|
|
||||||
- HORIZONTALLINE
|
|
||||||
- The port number to connect to the PAN-OS device on.
|
|
||||||
type: int
|
|
||||||
default: 443
|
|
||||||
notes:
|
|
||||||
- PAN-OS connectivity should be specified using I(provider) or the
|
|
||||||
classic PAN-OS connectivity params (I(ip_address), I(username),
|
|
||||||
I(password), I(api_key), and I(port)). If both are present, then the
|
|
||||||
classic params are ignored.
|
|
||||||
'''
|
|
||||||
|
|
||||||
STATE = r'''
|
|
||||||
options:
|
|
||||||
state:
|
|
||||||
description:
|
|
||||||
- The state.
|
|
||||||
type: str
|
|
||||||
default: present
|
|
||||||
choices:
|
|
||||||
- present
|
|
||||||
- absent
|
|
||||||
'''
|
|
||||||
|
|
||||||
RULEBASE = r'''
|
|
||||||
options:
|
|
||||||
rulebase:
|
|
||||||
description:
|
|
||||||
- The rulebase in which the rule is to exist. If left unspecified,
|
|
||||||
this defaults to I(rulebase=pre-rulebase) for Panorama. For
|
|
||||||
NGFW, this is always set to be I(rulebase=rulebase).
|
|
||||||
type: str
|
|
||||||
choices:
|
|
||||||
- pre-rulebase
|
|
||||||
- rulebase
|
|
||||||
- post-rulebase
|
|
||||||
'''
|
|
||||||
|
|
||||||
VSYS_DG = r'''
|
|
||||||
options:
|
|
||||||
vsys_dg:
|
|
||||||
description:
|
|
||||||
- The vsys (for NGFW) or device group (for Panorama) this
|
|
||||||
operation should target. If left unspecified, this defaults to
|
|
||||||
I(vsys_dg=vsys1) for NGFW or I(vsys_dg=shared) for Panorama.
|
|
||||||
type: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
DEVICE_GROUP = r'''
|
|
||||||
options:
|
|
||||||
device_group:
|
|
||||||
description:
|
|
||||||
- (Panorama only) The device group the operation should target.
|
|
||||||
type: str
|
|
||||||
default: shared
|
|
||||||
'''
|
|
||||||
|
|
||||||
VSYS_IMPORT = r'''
|
|
||||||
options:
|
|
||||||
vsys:
|
|
||||||
description:
|
|
||||||
- The vsys this object should be imported into. Objects that are
|
|
||||||
imported include interfaces, virtual routers, virtual wires, and
|
|
||||||
VLANs. Interfaces are typically imported into vsys1 if no vsys
|
|
||||||
is specified.
|
|
||||||
type: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
VSYS = r'''
|
|
||||||
options:
|
|
||||||
vsys:
|
|
||||||
description:
|
|
||||||
- The vsys this object belongs to.
|
|
||||||
type: str
|
|
||||||
default: vsys1
|
|
||||||
'''
|
|
||||||
|
|
||||||
TEMPLATE_ONLY = r'''
|
|
||||||
options:
|
|
||||||
template:
|
|
||||||
description:
|
|
||||||
- (Panorama only) The template this operation should target. This
|
|
||||||
param is required if the PAN-OS device is Panorama.
|
|
||||||
type: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
FULL_TEMPLATE_SUPPORT = r'''
|
|
||||||
options:
|
|
||||||
template:
|
|
||||||
description:
|
|
||||||
- (Panorama only) The template this operation should target.
|
|
||||||
Mutually exclusive with I(template_stack).
|
|
||||||
type: str
|
|
||||||
template_stack:
|
|
||||||
description:
|
|
||||||
- (Panorama only) The template stack this operation should target.
|
|
||||||
Mutually exclusive with I(template).
|
|
||||||
type: str
|
|
||||||
notes:
|
|
||||||
- If the PAN-OS to be configured is Panorama, either I(template) or
|
|
||||||
I(template_stack) must be specified.
|
|
||||||
'''
|
|
|
@ -1,61 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
provider:
|
|
||||||
description:
|
|
||||||
- A dict object containing connection details.
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Specifies the DNS host name or address for connecting to the remote
|
|
||||||
device over the specified transport. The value of host is used as
|
|
||||||
the destination address for the transport.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Specifies the port to use when building the connection to the remote
|
|
||||||
device.
|
|
||||||
type: int
|
|
||||||
default: 22
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Configures the username to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Specifies the password to use to authenticate the connection to
|
|
||||||
the remote device. This value is used to authenticate
|
|
||||||
the SSH session. If the value is not specified in the task, the
|
|
||||||
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
|
|
||||||
type: str
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- Specifies the timeout in seconds for communicating with the network device
|
|
||||||
for either connecting or sending commands. If the timeout is
|
|
||||||
exceeded before the operation is completed, the module will error.
|
|
||||||
type: int
|
|
||||||
default: 10
|
|
||||||
ssh_keyfile:
|
|
||||||
description:
|
|
||||||
- Specifies the SSH key to use to authenticate the connection to
|
|
||||||
the remote device. This value is the path to the
|
|
||||||
key used to authenticate the SSH session. If the value is not specified
|
|
||||||
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
|
|
||||||
will be used instead.
|
|
||||||
type: path
|
|
||||||
notes:
|
|
||||||
- For more information on using Ansible to manage Nokia SR OS Network devices see U(https://www.ansible.com/ansible-nokia).
|
|
||||||
'''
|
|
|
@ -1,252 +0,0 @@
|
||||||
# Copyright (c) 2019 Extreme Networks.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author:
|
|
||||||
- "Ujwal Komarla (@ujwalkomarla)"
|
|
||||||
httpapi: exos
|
|
||||||
short_description: Use EXOS REST APIs to communicate with EXOS platform
|
|
||||||
description:
|
|
||||||
- This plugin provides low level abstraction api's to send REST API
|
|
||||||
requests to EXOS network devices and receive JSON responses.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.connection import ConnectionError
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.plugins.httpapi import HttpApiBase
|
|
||||||
import ansible.module_utils.six.moves.http_cookiejar as cookiejar
|
|
||||||
from ansible.module_utils.common._collections_compat import Mapping
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps
|
|
||||||
|
|
||||||
|
|
||||||
class HttpApi(HttpApiBase):
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(HttpApi, self).__init__(*args, **kwargs)
|
|
||||||
self._device_info = None
|
|
||||||
self._auth_token = cookiejar.CookieJar()
|
|
||||||
|
|
||||||
def login(self, username, password):
|
|
||||||
auth_path = '/auth/token'
|
|
||||||
credentials = {'username': username, 'password': password}
|
|
||||||
self.send_request(path=auth_path, data=json.dumps(credentials), method='POST')
|
|
||||||
|
|
||||||
def logout(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def handle_httperror(self, exc):
|
|
||||||
return False
|
|
||||||
|
|
||||||
def send_request(self, path, data=None, method='GET', **message_kwargs):
|
|
||||||
headers = {'Content-Type': 'application/json'}
|
|
||||||
response, response_data = self.connection.send(path, data, method=method, cookies=self._auth_token, headers=headers, **message_kwargs)
|
|
||||||
try:
|
|
||||||
if response.status == 204:
|
|
||||||
response_data = {}
|
|
||||||
else:
|
|
||||||
response_data = json.loads(to_text(response_data.getvalue()))
|
|
||||||
except ValueError:
|
|
||||||
raise ConnectionError('Response was not valid JSON, got {0}'.format(
|
|
||||||
to_text(response_data.getvalue())
|
|
||||||
))
|
|
||||||
return response_data
|
|
||||||
|
|
||||||
def run_commands(self, commands, check_rc=True):
|
|
||||||
if commands is None:
|
|
||||||
raise ValueError("'commands' value is required")
|
|
||||||
|
|
||||||
headers = {'Content-Type': 'application/json'}
|
|
||||||
responses = list()
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if not isinstance(cmd, Mapping):
|
|
||||||
cmd = {'command': cmd}
|
|
||||||
|
|
||||||
cmd['command'] = strip_run_script_cli2json(cmd['command'])
|
|
||||||
|
|
||||||
output = cmd.pop('output', None)
|
|
||||||
if output and output not in self.get_option_values().get('output'):
|
|
||||||
raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output'))))
|
|
||||||
|
|
||||||
data = request_builder(cmd['command'])
|
|
||||||
|
|
||||||
response, response_data = self.connection.send('/jsonrpc', data, cookies=self._auth_token, headers=headers, method='POST')
|
|
||||||
try:
|
|
||||||
response_data = json.loads(to_text(response_data.getvalue()))
|
|
||||||
except ValueError:
|
|
||||||
raise ConnectionError('Response was not valid JSON, got {0}'.format(
|
|
||||||
to_text(response_data.getvalue())
|
|
||||||
))
|
|
||||||
|
|
||||||
if response_data.get('error', None):
|
|
||||||
raise ConnectionError("Request Error, got {0}".format(response_data['error']))
|
|
||||||
if not response_data.get('result', None):
|
|
||||||
raise ConnectionError("Request Error, got {0}".format(response_data))
|
|
||||||
|
|
||||||
response_data = response_data['result']
|
|
||||||
|
|
||||||
if output and output == 'text':
|
|
||||||
statusOut = getKeyInResponse(response_data, 'status')
|
|
||||||
cliOut = getKeyInResponse(response_data, 'CLIoutput')
|
|
||||||
if statusOut == "ERROR":
|
|
||||||
raise ConnectionError("Command error({1}) for request {0}".format(cmd['command'], cliOut))
|
|
||||||
if cliOut is None:
|
|
||||||
raise ValueError("Response for request {0} doesn't have the CLIoutput field, got {1}".format(cmd['command'], response_data))
|
|
||||||
response_data = cliOut
|
|
||||||
|
|
||||||
responses.append(response_data)
|
|
||||||
return responses
|
|
||||||
|
|
||||||
def get_device_info(self):
|
|
||||||
device_info = {}
|
|
||||||
device_info['network_os'] = 'exos'
|
|
||||||
|
|
||||||
reply = self.run_commands({'command': 'show switch detail', 'output': 'text'})
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
match = re.search(r'ExtremeXOS version (\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_version'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'System Type: +(\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_model'] = match.group(1)
|
|
||||||
|
|
||||||
match = re.search(r'SysName: +(\S+)', data)
|
|
||||||
if match:
|
|
||||||
device_info['network_os_hostname'] = match.group(1)
|
|
||||||
|
|
||||||
return device_info
|
|
||||||
|
|
||||||
def get_device_operations(self):
|
|
||||||
return {
|
|
||||||
'supports_diff_replace': False, # identify if config should be merged or replaced is supported
|
|
||||||
'supports_commit': False, # identify if commit is supported by device or not
|
|
||||||
'supports_rollback': False, # identify if rollback is supported or not
|
|
||||||
'supports_defaults': True, # identify if fetching running config with default is supported
|
|
||||||
'supports_commit_comment': False, # identify if adding comment to commit is supported of not
|
|
||||||
'supports_onbox_diff': False, # identify if on box diff capability is supported or not
|
|
||||||
'supports_generate_diff': True, # identify if diff capability is supported within plugin
|
|
||||||
'supports_multiline_delimiter': False, # identify if multiline demiliter is supported within config
|
|
||||||
'supports_diff_match': True, # identify if match is supported
|
|
||||||
'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported
|
|
||||||
'supports_config_replace': False, # identify if running config replace with candidate config is supported
|
|
||||||
'supports_admin': False, # identify if admin configure mode is supported or not
|
|
||||||
'supports_commit_label': False # identify if commit label is supported or not
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_option_values(self):
|
|
||||||
return {
|
|
||||||
'format': ['text', 'json'],
|
|
||||||
'diff_match': ['line', 'strict', 'exact', 'none'],
|
|
||||||
'diff_replace': ['line', 'block'],
|
|
||||||
'output': ['text', 'json']
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_capabilities(self):
|
|
||||||
result = {}
|
|
||||||
result['rpc'] = ['get_default_flag', 'run_commands', 'get_config', 'send_request', 'get_capabilities', 'get_diff']
|
|
||||||
result['device_info'] = self.get_device_info()
|
|
||||||
result['device_operations'] = self.get_device_operations()
|
|
||||||
result.update(self.get_option_values())
|
|
||||||
result['network_api'] = 'exosapi'
|
|
||||||
return json.dumps(result)
|
|
||||||
|
|
||||||
def get_default_flag(self):
|
|
||||||
# The flag to modify the command to collect configuration with defaults
|
|
||||||
return 'detail'
|
|
||||||
|
|
||||||
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
|
|
||||||
diff = {}
|
|
||||||
device_operations = self.get_device_operations()
|
|
||||||
option_values = self.get_option_values()
|
|
||||||
|
|
||||||
if candidate is None and device_operations['supports_generate_diff']:
|
|
||||||
raise ValueError("candidate configuration is required to generate diff")
|
|
||||||
|
|
||||||
if diff_match not in option_values['diff_match']:
|
|
||||||
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
|
|
||||||
|
|
||||||
if diff_replace not in option_values['diff_replace']:
|
|
||||||
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
|
|
||||||
|
|
||||||
# prepare candidate configuration
|
|
||||||
candidate_obj = NetworkConfig(indent=1)
|
|
||||||
candidate_obj.load(candidate)
|
|
||||||
|
|
||||||
if running and diff_match != 'none' and diff_replace != 'config':
|
|
||||||
# running configuration
|
|
||||||
running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines)
|
|
||||||
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
|
|
||||||
|
|
||||||
else:
|
|
||||||
configdiffobjs = candidate_obj.items
|
|
||||||
|
|
||||||
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
|
|
||||||
return diff
|
|
||||||
|
|
||||||
def get_config(self, source='running', format='text', flags=None):
|
|
||||||
options_values = self.get_option_values()
|
|
||||||
if format not in options_values['format']:
|
|
||||||
raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format'])))
|
|
||||||
|
|
||||||
lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'}
|
|
||||||
if source not in lookup:
|
|
||||||
raise ValueError("fetching configuration from %s is not supported" % source)
|
|
||||||
|
|
||||||
cmd = {'command': lookup[source], 'output': 'text'}
|
|
||||||
|
|
||||||
if source == 'startup':
|
|
||||||
reply = self.run_commands({'command': 'show switch', 'format': 'text'})
|
|
||||||
data = to_text(reply, errors='surrogate_or_strict').strip()
|
|
||||||
match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE)
|
|
||||||
if match:
|
|
||||||
cmd['command'] += match.group(1)
|
|
||||||
else:
|
|
||||||
# No Startup(/Selected) Config
|
|
||||||
return {}
|
|
||||||
|
|
||||||
cmd['command'] += ' '.join(to_list(flags))
|
|
||||||
cmd['command'] = cmd['command'].strip()
|
|
||||||
|
|
||||||
return self.run_commands(cmd)[0]
|
|
||||||
|
|
||||||
|
|
||||||
def request_builder(command, reqid=""):
|
|
||||||
return json.dumps(dict(jsonrpc='2.0', id=reqid, method='cli', params=to_list(command)))
|
|
||||||
|
|
||||||
|
|
||||||
def strip_run_script_cli2json(command):
|
|
||||||
if to_text(command, errors="surrogate_then_replace").startswith('run script cli2json.py'):
|
|
||||||
command = str(command).replace('run script cli2json.py', '')
|
|
||||||
return command
|
|
||||||
|
|
||||||
|
|
||||||
def getKeyInResponse(response, key):
|
|
||||||
keyOut = None
|
|
||||||
for item in response:
|
|
||||||
if key in item:
|
|
||||||
keyOut = item[key]
|
|
||||||
break
|
|
||||||
return keyOut
|
|
|
@ -1,453 +0,0 @@
|
||||||
# Copyright (c) 2018 Fortinet and/or its affiliates.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author:
|
|
||||||
- Luke Weighall (@lweighall)
|
|
||||||
- Andrew Welsh (@Ghilli3)
|
|
||||||
- Jim Huber (@p4r4n0y1ng)
|
|
||||||
httpapi : fortianalyzer
|
|
||||||
short_description: HttpApi Plugin for Fortinet FortiAnalyzer Appliance or VM.
|
|
||||||
description:
|
|
||||||
- This HttpApi plugin provides methods to connect to Fortinet FortiAnalyzer Appliance or VM via JSON RPC API.
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
from ansible.plugins.httpapi import HttpApiBase
|
|
||||||
from ansible.module_utils.basic import to_text
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import BASE_HEADERS
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZBaseException
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZCommon
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZMethods
|
|
||||||
|
|
||||||
|
|
||||||
class HttpApi(HttpApiBase):
|
|
||||||
def __init__(self, connection):
|
|
||||||
super(HttpApi, self).__init__(connection)
|
|
||||||
self._req_id = 0
|
|
||||||
self._sid = None
|
|
||||||
self._url = "/jsonrpc"
|
|
||||||
self._host = None
|
|
||||||
self._tools = FAZCommon
|
|
||||||
self._debug = False
|
|
||||||
self._connected_faz = None
|
|
||||||
self._last_response_msg = None
|
|
||||||
self._last_response_code = None
|
|
||||||
self._last_data_payload = None
|
|
||||||
self._last_url = None
|
|
||||||
self._last_response_raw = None
|
|
||||||
self._locked_adom_list = list()
|
|
||||||
self._locked_adoms_by_user = list()
|
|
||||||
self._uses_workspace = False
|
|
||||||
self._uses_adoms = False
|
|
||||||
self._adom_list = list()
|
|
||||||
self._logged_in_user = None
|
|
||||||
|
|
||||||
def set_become(self, become_context):
|
|
||||||
"""
|
|
||||||
ELEVATION IS NOT REQUIRED ON FORTINET DEVICES - SKIPPED
|
|
||||||
:param become_context: Unused input.
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
return None
|
|
||||||
|
|
||||||
def update_auth(self, response, response_data):
|
|
||||||
"""
|
|
||||||
TOKENS ARE NOT USED SO NO NEED TO UPDATE AUTH
|
|
||||||
:param response: Unused input.
|
|
||||||
:param response_data Unused_input.
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
return None
|
|
||||||
|
|
||||||
def login(self, username, password):
|
|
||||||
"""
|
|
||||||
This function will log the plugin into FortiAnalyzer, and return the results.
|
|
||||||
:param username: Username of FortiAnalyzer Admin
|
|
||||||
:param password: Password of FortiAnalyzer Admin
|
|
||||||
|
|
||||||
:return: Dictionary of status if it logged in or not.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self._logged_in_user = username
|
|
||||||
self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, "sys/login/user",
|
|
||||||
passwd=password, user=username,))
|
|
||||||
|
|
||||||
if "FortiAnalyzer object connected to FortiAnalyzer" in self.__str__():
|
|
||||||
# If Login worked then inspect the FortiAnalyzer for Workspace Mode, and it's system information.
|
|
||||||
self.inspect_faz()
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
raise FAZBaseException(msg="Unknown error while logging in...connection was lost during login operation..."
|
|
||||||
" Exiting")
|
|
||||||
|
|
||||||
def inspect_faz(self):
|
|
||||||
# CHECK FOR WORKSPACE MODE TO SEE IF WE HAVE TO ENABLE ADOM LOCKS
|
|
||||||
status = self.get_system_status()
|
|
||||||
if status[0] == -11:
|
|
||||||
# THE CONNECTION GOT LOST SOMEHOW, REMOVE THE SID AND REPORT BAD LOGIN
|
|
||||||
self.logout()
|
|
||||||
raise FAZBaseException(msg="Error -11 -- the Session ID was likely malformed somehow. Contact authors."
|
|
||||||
" Exiting")
|
|
||||||
elif status[0] == 0:
|
|
||||||
try:
|
|
||||||
self.check_mode()
|
|
||||||
if self._uses_adoms:
|
|
||||||
self.get_adom_list()
|
|
||||||
if self._uses_workspace:
|
|
||||||
self.get_locked_adom_list()
|
|
||||||
self._connected_faz = status[1]
|
|
||||||
self._host = self._connected_faz["Hostname"]
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
return
|
|
||||||
|
|
||||||
def logout(self):
|
|
||||||
"""
|
|
||||||
This function will logout of the FortiAnalyzer.
|
|
||||||
"""
|
|
||||||
if self.sid is not None:
|
|
||||||
# IF WE WERE USING WORKSPACES, THEN CLEAN UP OUR LOCKS IF THEY STILL EXIST
|
|
||||||
if self.uses_workspace:
|
|
||||||
self.get_lock_info()
|
|
||||||
self.run_unlock()
|
|
||||||
ret_code, response = self.send_request(FAZMethods.EXEC,
|
|
||||||
self._tools.format_request(FAZMethods.EXEC, "sys/logout"))
|
|
||||||
self.sid = None
|
|
||||||
return ret_code, response
|
|
||||||
|
|
||||||
def send_request(self, method, params):
|
|
||||||
"""
|
|
||||||
Responsible for actual sending of data to the connection httpapi base plugin. Does some formatting as well.
|
|
||||||
:param params: A formatted dictionary that was returned by self.common_datagram_params()
|
|
||||||
before being called here.
|
|
||||||
:param method: The preferred API Request method (GET, ADD, POST, etc....)
|
|
||||||
:type method: basestring
|
|
||||||
|
|
||||||
:return: Dictionary of status if it logged in or not.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
if self.sid is None and params[0]["url"] != "sys/login/user":
|
|
||||||
try:
|
|
||||||
self.connection._connect()
|
|
||||||
except Exception as err:
|
|
||||||
raise FAZBaseException(
|
|
||||||
msg="An problem happened with the httpapi plugin self-init connection process. "
|
|
||||||
"Error: " + to_text(err))
|
|
||||||
except IndexError:
|
|
||||||
raise FAZBaseException("An attempt was made at communicating with a FAZ with "
|
|
||||||
"no valid session and an incorrectly formatted request.")
|
|
||||||
except Exception:
|
|
||||||
raise FAZBaseException("An attempt was made at communicating with a FAZ with "
|
|
||||||
"no valid session and an unexpected error was discovered.")
|
|
||||||
|
|
||||||
self._update_request_id()
|
|
||||||
json_request = {
|
|
||||||
"method": method,
|
|
||||||
"params": params,
|
|
||||||
"session": self.sid,
|
|
||||||
"id": self.req_id,
|
|
||||||
"verbose": 1
|
|
||||||
}
|
|
||||||
data = json.dumps(json_request, ensure_ascii=False).replace('\\\\', '\\')
|
|
||||||
try:
|
|
||||||
# Sending URL and Data in Unicode, per Ansible Specifications for Connection Plugins
|
|
||||||
response, response_data = self.connection.send(path=to_text(self._url), data=to_text(data),
|
|
||||||
headers=BASE_HEADERS)
|
|
||||||
# Get Unicode Response - Must convert from StringIO to unicode first so we can do a replace function below
|
|
||||||
result = json.loads(to_text(response_data.getvalue()))
|
|
||||||
self._update_self_from_response(result, self._url, data)
|
|
||||||
return self._handle_response(result)
|
|
||||||
except Exception as err:
|
|
||||||
raise FAZBaseException(err)
|
|
||||||
|
|
||||||
def _handle_response(self, response):
|
|
||||||
self._set_sid(response)
|
|
||||||
if isinstance(response["result"], list):
|
|
||||||
result = response["result"][0]
|
|
||||||
else:
|
|
||||||
result = response["result"]
|
|
||||||
if "data" in result:
|
|
||||||
return result["status"]["code"], result["data"]
|
|
||||||
else:
|
|
||||||
return result["status"]["code"], result
|
|
||||||
|
|
||||||
def _update_self_from_response(self, response, url, data):
|
|
||||||
self._last_response_raw = response
|
|
||||||
if isinstance(response["result"], list):
|
|
||||||
result = response["result"][0]
|
|
||||||
else:
|
|
||||||
result = response["result"]
|
|
||||||
if "status" in result:
|
|
||||||
self._last_response_code = result["status"]["code"]
|
|
||||||
self._last_response_msg = result["status"]["message"]
|
|
||||||
self._last_url = url
|
|
||||||
self._last_data_payload = data
|
|
||||||
|
|
||||||
def _set_sid(self, response):
|
|
||||||
if self.sid is None and "session" in response:
|
|
||||||
self.sid = response["session"]
|
|
||||||
|
|
||||||
def return_connected_faz(self):
|
|
||||||
"""
|
|
||||||
Returns the data stored under self._connected_faz
|
|
||||||
|
|
||||||
:return: dict
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if self._connected_faz:
|
|
||||||
return self._connected_faz
|
|
||||||
except Exception:
|
|
||||||
raise FAZBaseException("Couldn't Retrieve Connected FAZ Stats")
|
|
||||||
|
|
||||||
def get_system_status(self):
|
|
||||||
"""
|
|
||||||
Returns the system status page from the FortiAnalyzer, for logging and other uses.
|
|
||||||
return: status
|
|
||||||
"""
|
|
||||||
status = self.send_request(FAZMethods.GET, self._tools.format_request(FAZMethods.GET, "sys/status"))
|
|
||||||
return status
|
|
||||||
|
|
||||||
@property
|
|
||||||
def debug(self):
|
|
||||||
return self._debug
|
|
||||||
|
|
||||||
@debug.setter
|
|
||||||
def debug(self, val):
|
|
||||||
self._debug = val
|
|
||||||
|
|
||||||
@property
|
|
||||||
def req_id(self):
|
|
||||||
return self._req_id
|
|
||||||
|
|
||||||
@req_id.setter
|
|
||||||
def req_id(self, val):
|
|
||||||
self._req_id = val
|
|
||||||
|
|
||||||
def _update_request_id(self, reqid=0):
|
|
||||||
self.req_id = reqid if reqid != 0 else self.req_id + 1
|
|
||||||
|
|
||||||
@property
|
|
||||||
def sid(self):
|
|
||||||
return self._sid
|
|
||||||
|
|
||||||
@sid.setter
|
|
||||||
def sid(self, val):
|
|
||||||
self._sid = val
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
if self.sid is not None and self.connection._url is not None:
|
|
||||||
return "FortiAnalyzer object connected to FortiAnalyzer: " + to_text(self.connection._url)
|
|
||||||
return "FortiAnalyzer object with no valid connection to a FortiAnalyzer appliance."
|
|
||||||
|
|
||||||
##################################
|
|
||||||
# BEGIN DATABASE LOCK CONTEXT CODE
|
|
||||||
##################################
|
|
||||||
|
|
||||||
@property
|
|
||||||
def uses_workspace(self):
|
|
||||||
return self._uses_workspace
|
|
||||||
|
|
||||||
@uses_workspace.setter
|
|
||||||
def uses_workspace(self, val):
|
|
||||||
self._uses_workspace = val
|
|
||||||
|
|
||||||
@property
|
|
||||||
def uses_adoms(self):
|
|
||||||
return self._uses_adoms
|
|
||||||
|
|
||||||
@uses_adoms.setter
|
|
||||||
def uses_adoms(self, val):
|
|
||||||
self._uses_adoms = val
|
|
||||||
|
|
||||||
def add_adom_to_lock_list(self, adom):
|
|
||||||
if adom not in self._locked_adom_list:
|
|
||||||
self._locked_adom_list.append(adom)
|
|
||||||
|
|
||||||
def remove_adom_from_lock_list(self, adom):
|
|
||||||
if adom in self._locked_adom_list:
|
|
||||||
self._locked_adom_list.remove(adom)
|
|
||||||
|
|
||||||
def check_mode(self):
|
|
||||||
"""
|
|
||||||
Checks FortiAnalyzer for the use of Workspace mode
|
|
||||||
"""
|
|
||||||
url = "/cli/global/system/global"
|
|
||||||
code, resp_obj = self.send_request(FAZMethods.GET,
|
|
||||||
self._tools.format_request(FAZMethods.GET,
|
|
||||||
url,
|
|
||||||
fields=["workspace-mode", "adom-status"]))
|
|
||||||
try:
|
|
||||||
if resp_obj["workspace-mode"] == "workflow":
|
|
||||||
self.uses_workspace = True
|
|
||||||
elif resp_obj["workspace-mode"] == "disabled":
|
|
||||||
self.uses_workspace = False
|
|
||||||
except KeyError:
|
|
||||||
self.uses_workspace = False
|
|
||||||
except Exception:
|
|
||||||
raise FAZBaseException(msg="Couldn't determine workspace-mode in the plugin")
|
|
||||||
try:
|
|
||||||
if resp_obj["adom-status"] in [1, "enable"]:
|
|
||||||
self.uses_adoms = True
|
|
||||||
else:
|
|
||||||
self.uses_adoms = False
|
|
||||||
except KeyError:
|
|
||||||
self.uses_adoms = False
|
|
||||||
except Exception:
|
|
||||||
raise FAZBaseException(msg="Couldn't determine adom-status in the plugin")
|
|
||||||
|
|
||||||
def run_unlock(self):
|
|
||||||
"""
|
|
||||||
Checks for ADOM status, if locked, it will unlock
|
|
||||||
"""
|
|
||||||
for adom_locked in self._locked_adoms_by_user:
|
|
||||||
adom = adom_locked["adom"]
|
|
||||||
self.unlock_adom(adom)
|
|
||||||
|
|
||||||
def lock_adom(self, adom=None, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Locks an ADOM for changes
|
|
||||||
"""
|
|
||||||
if adom:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/lock/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom)
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/root/workspace/lock"
|
|
||||||
code, respobj = self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, url))
|
|
||||||
if code == 0 and respobj["status"]["message"].lower() == "ok":
|
|
||||||
self.add_adom_to_lock_list(adom)
|
|
||||||
return code, respobj
|
|
||||||
|
|
||||||
def unlock_adom(self, adom=None, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Unlocks an ADOM after changes
|
|
||||||
"""
|
|
||||||
if adom:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/unlock/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom)
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/root/workspace/unlock"
|
|
||||||
code, respobj = self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, url))
|
|
||||||
if code == 0 and respobj["status"]["message"].lower() == "ok":
|
|
||||||
self.remove_adom_from_lock_list(adom)
|
|
||||||
return code, respobj
|
|
||||||
|
|
||||||
def commit_changes(self, adom=None, aux=False, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Commits changes to an ADOM
|
|
||||||
"""
|
|
||||||
if adom:
|
|
||||||
if aux:
|
|
||||||
url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom)
|
|
||||||
else:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/commit/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom)
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/root/workspace/commit"
|
|
||||||
return self.send_request(FAZMethods.EXEC, self._tools.format_request(FAZMethods.EXEC, url))
|
|
||||||
|
|
||||||
def get_lock_info(self, adom=None):
|
|
||||||
"""
|
|
||||||
Gets ADOM lock info so it can be displayed with the error messages. Or if determined to be locked by ansible
|
|
||||||
for some reason, then unlock it.
|
|
||||||
"""
|
|
||||||
if not adom or adom == "root":
|
|
||||||
url = "/dvmdb/adom/root/workspace/lockinfo"
|
|
||||||
else:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/lockinfo/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/lockinfo/".format(adom=adom)
|
|
||||||
datagram = {}
|
|
||||||
data = self._tools.format_request(FAZMethods.GET, url, **datagram)
|
|
||||||
resp_obj = self.send_request(FAZMethods.GET, data)
|
|
||||||
code = resp_obj[0]
|
|
||||||
if code != 0:
|
|
||||||
self._module.fail_json(msg=("An error occurred trying to get the ADOM Lock Info. Error: " + to_text(resp_obj)))
|
|
||||||
elif code == 0:
|
|
||||||
try:
|
|
||||||
if resp_obj[1]["status"]["message"] == "OK":
|
|
||||||
self._lock_info = None
|
|
||||||
except Exception:
|
|
||||||
self._lock_info = resp_obj[1]
|
|
||||||
return resp_obj
|
|
||||||
|
|
||||||
def get_adom_list(self):
|
|
||||||
"""
|
|
||||||
Gets the list of ADOMs for the FortiAnalyzer
|
|
||||||
"""
|
|
||||||
if self.uses_adoms:
|
|
||||||
url = "/dvmdb/adom"
|
|
||||||
datagram = {}
|
|
||||||
data = self._tools.format_request(FAZMethods.GET, url, **datagram)
|
|
||||||
resp_obj = self.send_request(FAZMethods.GET, data)
|
|
||||||
code = resp_obj[0]
|
|
||||||
if code != 0:
|
|
||||||
self._module.fail_json(msg=("An error occurred trying to get the ADOM Info. Error: " + to_text(resp_obj)))
|
|
||||||
elif code == 0:
|
|
||||||
num_of_adoms = len(resp_obj[1])
|
|
||||||
append_list = ['root', ]
|
|
||||||
for adom in resp_obj[1]:
|
|
||||||
if adom["tab_status"] != "":
|
|
||||||
append_list.append(to_text(adom["name"]))
|
|
||||||
self._adom_list = append_list
|
|
||||||
return resp_obj
|
|
||||||
|
|
||||||
def get_locked_adom_list(self):
|
|
||||||
"""
|
|
||||||
Gets the list of locked adoms
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
locked_list = list()
|
|
||||||
locked_by_user_list = list()
|
|
||||||
for adom in self._adom_list:
|
|
||||||
adom_lock_info = self.get_lock_info(adom=adom)
|
|
||||||
try:
|
|
||||||
if adom_lock_info[1]["status"]["message"] == "OK":
|
|
||||||
continue
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
if adom_lock_info[1][0]["lock_user"]:
|
|
||||||
locked_list.append(to_text(adom))
|
|
||||||
if adom_lock_info[1][0]["lock_user"] == self._logged_in_user:
|
|
||||||
locked_by_user_list.append({"adom": to_text(adom), "user": to_text(adom_lock_info[1][0]["lock_user"])})
|
|
||||||
except Exception as err:
|
|
||||||
raise FAZBaseException(err)
|
|
||||||
self._locked_adom_list = locked_list
|
|
||||||
self._locked_adoms_by_user = locked_by_user_list
|
|
||||||
|
|
||||||
except Exception as err:
|
|
||||||
raise FAZBaseException(msg=("An error occurred while trying to get the locked adom list. Error: "
|
|
||||||
+ to_text(err)))
|
|
||||||
|
|
||||||
#################################
|
|
||||||
# END DATABASE LOCK CONTEXT CODE
|
|
||||||
#################################
|
|
|
@ -1,451 +0,0 @@
|
||||||
# Copyright (c) 2018 Fortinet and/or its affiliates.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author:
|
|
||||||
- Luke Weighall (@lweighall)
|
|
||||||
- Andrew Welsh (@Ghilli3)
|
|
||||||
- Jim Huber (@p4r4n0y1ng)
|
|
||||||
httpapi : fortimanager
|
|
||||||
short_description: HttpApi Plugin for Fortinet FortiManager Appliance or VM.
|
|
||||||
description:
|
|
||||||
- This HttpApi plugin provides methods to connect to Fortinet FortiManager Appliance or VM via JSON RPC API.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
from ansible.plugins.httpapi import HttpApiBase
|
|
||||||
from ansible.module_utils.basic import to_text
|
|
||||||
from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import BASE_HEADERS
|
|
||||||
from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGBaseException
|
|
||||||
from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRCommon
|
|
||||||
from ansible_collections.fortinet.fortios.plugins.module_utils.network.fortimanager.common import FMGRMethods
|
|
||||||
|
|
||||||
|
|
||||||
class HttpApi(HttpApiBase):
|
|
||||||
def __init__(self, connection):
|
|
||||||
super(HttpApi, self).__init__(connection)
|
|
||||||
self._req_id = 0
|
|
||||||
self._sid = None
|
|
||||||
self._url = "/jsonrpc"
|
|
||||||
self._host = None
|
|
||||||
self._tools = FMGRCommon
|
|
||||||
self._debug = False
|
|
||||||
self._connected_fmgr = None
|
|
||||||
self._last_response_msg = None
|
|
||||||
self._last_response_code = None
|
|
||||||
self._last_data_payload = None
|
|
||||||
self._last_url = None
|
|
||||||
self._last_response_raw = None
|
|
||||||
self._locked_adom_list = list()
|
|
||||||
self._locked_adoms_by_user = list()
|
|
||||||
self._uses_workspace = False
|
|
||||||
self._uses_adoms = False
|
|
||||||
self._adom_list = list()
|
|
||||||
self._logged_in_user = None
|
|
||||||
|
|
||||||
def set_become(self, become_context):
|
|
||||||
"""
|
|
||||||
ELEVATION IS NOT REQUIRED ON FORTINET DEVICES - SKIPPED.
|
|
||||||
:param become_context: Unused input.
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
return None
|
|
||||||
|
|
||||||
def update_auth(self, response, response_data):
|
|
||||||
"""
|
|
||||||
TOKENS ARE NOT USED SO NO NEED TO UPDATE AUTH.
|
|
||||||
:param response: Unused input.
|
|
||||||
:param response_data Unused_input.
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
return None
|
|
||||||
|
|
||||||
def login(self, username, password):
|
|
||||||
|
|
||||||
"""
|
|
||||||
This function will log the plugin into FortiManager, and return the results.
|
|
||||||
:param username: Username of FortiManager Admin
|
|
||||||
:param password: Password of FortiManager Admin
|
|
||||||
|
|
||||||
:return: Dictionary of status if it logged in or not.
|
|
||||||
"""
|
|
||||||
self._logged_in_user = username
|
|
||||||
self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, "sys/login/user",
|
|
||||||
passwd=password, user=username, ))
|
|
||||||
|
|
||||||
if "FortiManager object connected to FortiManager" in self.__str__():
|
|
||||||
# If Login worked, then inspect the FortiManager for Workspace Mode, and it's system information.
|
|
||||||
self.inspect_fmgr()
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
raise FMGBaseException(msg="Unknown error while logging in...connection was lost during login operation...."
|
|
||||||
" Exiting")
|
|
||||||
|
|
||||||
def inspect_fmgr(self):
|
|
||||||
# CHECK FOR WORKSPACE MODE TO SEE IF WE HAVE TO ENABLE ADOM LOCKS
|
|
||||||
status = self.get_system_status()
|
|
||||||
if status[0] == -11:
|
|
||||||
# THE CONNECTION GOT LOST SOMEHOW, REMOVE THE SID AND REPORT BAD LOGIN
|
|
||||||
self.logout()
|
|
||||||
raise FMGBaseException(msg="Error -11 -- the Session ID was likely malformed somehow. Contact authors."
|
|
||||||
" Exiting")
|
|
||||||
elif status[0] == 0:
|
|
||||||
try:
|
|
||||||
self.check_mode()
|
|
||||||
if self._uses_adoms:
|
|
||||||
self.get_adom_list()
|
|
||||||
if self._uses_workspace:
|
|
||||||
self.get_locked_adom_list()
|
|
||||||
self._connected_fmgr = status[1]
|
|
||||||
self._host = self._connected_fmgr["Hostname"]
|
|
||||||
except BaseException:
|
|
||||||
pass
|
|
||||||
return
|
|
||||||
|
|
||||||
def logout(self):
|
|
||||||
"""
|
|
||||||
This function will logout of the FortiManager.
|
|
||||||
"""
|
|
||||||
if self.sid is not None:
|
|
||||||
# IF WE WERE USING WORKSPACES, THEN CLEAN UP OUR LOCKS IF THEY STILL EXIST
|
|
||||||
if self.uses_workspace:
|
|
||||||
self.get_lock_info()
|
|
||||||
self.run_unlock()
|
|
||||||
ret_code, response = self.send_request(FMGRMethods.EXEC,
|
|
||||||
self._tools.format_request(FMGRMethods.EXEC, "sys/logout"))
|
|
||||||
self.sid = None
|
|
||||||
return ret_code, response
|
|
||||||
|
|
||||||
def send_request(self, method, params):
|
|
||||||
"""
|
|
||||||
Responsible for actual sending of data to the connection httpapi base plugin. Does some formatting too.
|
|
||||||
:param params: A formatted dictionary that was returned by self.common_datagram_params()
|
|
||||||
before being called here.
|
|
||||||
:param method: The preferred API Request method (GET, ADD, POST, etc....)
|
|
||||||
:type method: basestring
|
|
||||||
|
|
||||||
:return: Dictionary of status, if it logged in or not.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if self.sid is None and params[0]["url"] != "sys/login/user":
|
|
||||||
try:
|
|
||||||
self.connection._connect()
|
|
||||||
except Exception as err:
|
|
||||||
raise FMGBaseException(
|
|
||||||
msg="An problem happened with the httpapi plugin self-init connection process. "
|
|
||||||
"Error: " + to_text(err))
|
|
||||||
except IndexError:
|
|
||||||
raise FMGBaseException("An attempt was made at communicating with a FMG with "
|
|
||||||
"no valid session and an incorrectly formatted request.")
|
|
||||||
except Exception as err:
|
|
||||||
raise FMGBaseException("An attempt was made at communicating with a FMG with "
|
|
||||||
"no valid session and an unexpected error was discovered. \n Error: " + to_text(err))
|
|
||||||
|
|
||||||
self._update_request_id()
|
|
||||||
json_request = {
|
|
||||||
"method": method,
|
|
||||||
"params": params,
|
|
||||||
"session": self.sid,
|
|
||||||
"id": self.req_id,
|
|
||||||
"verbose": 1
|
|
||||||
}
|
|
||||||
data = json.dumps(json_request, ensure_ascii=False).replace('\\\\', '\\')
|
|
||||||
try:
|
|
||||||
# Sending URL and Data in Unicode, per Ansible Specifications for Connection Plugins
|
|
||||||
response, response_data = self.connection.send(path=to_text(self._url), data=to_text(data),
|
|
||||||
headers=BASE_HEADERS)
|
|
||||||
# Get Unicode Response - Must convert from StringIO to unicode first so we can do a replace function below
|
|
||||||
result = json.loads(to_text(response_data.getvalue()))
|
|
||||||
self._update_self_from_response(result, self._url, data)
|
|
||||||
return self._handle_response(result)
|
|
||||||
except Exception as err:
|
|
||||||
raise FMGBaseException(err)
|
|
||||||
|
|
||||||
def _handle_response(self, response):
|
|
||||||
self._set_sid(response)
|
|
||||||
if isinstance(response["result"], list):
|
|
||||||
result = response["result"][0]
|
|
||||||
else:
|
|
||||||
result = response["result"]
|
|
||||||
if "data" in result:
|
|
||||||
return result["status"]["code"], result["data"]
|
|
||||||
else:
|
|
||||||
return result["status"]["code"], result
|
|
||||||
|
|
||||||
def _update_self_from_response(self, response, url, data):
|
|
||||||
self._last_response_raw = response
|
|
||||||
if isinstance(response["result"], list):
|
|
||||||
result = response["result"][0]
|
|
||||||
else:
|
|
||||||
result = response["result"]
|
|
||||||
if "status" in result:
|
|
||||||
self._last_response_code = result["status"]["code"]
|
|
||||||
self._last_response_msg = result["status"]["message"]
|
|
||||||
self._last_url = url
|
|
||||||
self._last_data_payload = data
|
|
||||||
|
|
||||||
def _set_sid(self, response):
|
|
||||||
if self.sid is None and "session" in response:
|
|
||||||
self.sid = response["session"]
|
|
||||||
|
|
||||||
def return_connected_fmgr(self):
|
|
||||||
"""
|
|
||||||
Returns the data stored under self._connected_fmgr
|
|
||||||
|
|
||||||
:return: dict
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if self._connected_fmgr:
|
|
||||||
return self._connected_fmgr
|
|
||||||
except Exception:
|
|
||||||
raise FMGBaseException("Couldn't Retrieve Connected FMGR Stats")
|
|
||||||
|
|
||||||
def get_system_status(self):
|
|
||||||
"""
|
|
||||||
Returns the system status page from the FortiManager, for logging and other uses.
|
|
||||||
return: status
|
|
||||||
"""
|
|
||||||
status = self.send_request(FMGRMethods.GET, self._tools.format_request(FMGRMethods.GET, "sys/status"))
|
|
||||||
return status
|
|
||||||
|
|
||||||
@property
|
|
||||||
def debug(self):
|
|
||||||
return self._debug
|
|
||||||
|
|
||||||
@debug.setter
|
|
||||||
def debug(self, val):
|
|
||||||
self._debug = val
|
|
||||||
|
|
||||||
@property
|
|
||||||
def req_id(self):
|
|
||||||
return self._req_id
|
|
||||||
|
|
||||||
@req_id.setter
|
|
||||||
def req_id(self, val):
|
|
||||||
self._req_id = val
|
|
||||||
|
|
||||||
def _update_request_id(self, reqid=0):
|
|
||||||
self.req_id = reqid if reqid != 0 else self.req_id + 1
|
|
||||||
|
|
||||||
@property
|
|
||||||
def sid(self):
|
|
||||||
return self._sid
|
|
||||||
|
|
||||||
@sid.setter
|
|
||||||
def sid(self, val):
|
|
||||||
self._sid = val
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
if self.sid is not None and self.connection._url is not None:
|
|
||||||
return "FortiManager object connected to FortiManager: " + to_text(self.connection._url)
|
|
||||||
return "FortiManager object with no valid connection to a FortiManager appliance."
|
|
||||||
|
|
||||||
##################################
|
|
||||||
# BEGIN DATABASE LOCK CONTEXT CODE
|
|
||||||
##################################
|
|
||||||
|
|
||||||
@property
|
|
||||||
def uses_workspace(self):
|
|
||||||
return self._uses_workspace
|
|
||||||
|
|
||||||
@uses_workspace.setter
|
|
||||||
def uses_workspace(self, val):
|
|
||||||
self._uses_workspace = val
|
|
||||||
|
|
||||||
@property
|
|
||||||
def uses_adoms(self):
|
|
||||||
return self._uses_adoms
|
|
||||||
|
|
||||||
@uses_adoms.setter
|
|
||||||
def uses_adoms(self, val):
|
|
||||||
self._uses_adoms = val
|
|
||||||
|
|
||||||
def add_adom_to_lock_list(self, adom):
|
|
||||||
if adom not in self._locked_adom_list:
|
|
||||||
self._locked_adom_list.append(adom)
|
|
||||||
|
|
||||||
def remove_adom_from_lock_list(self, adom):
|
|
||||||
if adom in self._locked_adom_list:
|
|
||||||
self._locked_adom_list.remove(adom)
|
|
||||||
|
|
||||||
def check_mode(self):
|
|
||||||
"""
|
|
||||||
Checks FortiManager for the use of Workspace mode
|
|
||||||
"""
|
|
||||||
url = "/cli/global/system/global"
|
|
||||||
code, resp_obj = self.send_request(FMGRMethods.GET,
|
|
||||||
self._tools.format_request(FMGRMethods.GET,
|
|
||||||
url,
|
|
||||||
fields=["workspace-mode", "adom-status"]))
|
|
||||||
try:
|
|
||||||
if resp_obj["workspace-mode"] == "workflow":
|
|
||||||
self.uses_workspace = True
|
|
||||||
elif resp_obj["workspace-mode"] == "disabled":
|
|
||||||
self.uses_workspace = False
|
|
||||||
except KeyError:
|
|
||||||
raise FMGBaseException(msg="Couldn't determine workspace-mode in the plugin")
|
|
||||||
try:
|
|
||||||
if resp_obj["adom-status"] in [1, "enable"]:
|
|
||||||
self.uses_adoms = True
|
|
||||||
else:
|
|
||||||
self.uses_adoms = False
|
|
||||||
except KeyError:
|
|
||||||
raise FMGBaseException(msg="Couldn't determine adom-status in the plugin")
|
|
||||||
|
|
||||||
def run_unlock(self):
|
|
||||||
"""
|
|
||||||
Checks for ADOM status, if locked, it will unlock
|
|
||||||
"""
|
|
||||||
for adom_locked in self._locked_adoms_by_user:
|
|
||||||
adom = adom_locked["adom"]
|
|
||||||
self.unlock_adom(adom)
|
|
||||||
|
|
||||||
def lock_adom(self, adom=None, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Locks an ADOM for changes
|
|
||||||
"""
|
|
||||||
if adom:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/lock/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom)
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/root/workspace/lock"
|
|
||||||
code, respobj = self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, url))
|
|
||||||
if code == 0 and respobj["status"]["message"].lower() == "ok":
|
|
||||||
self.add_adom_to_lock_list(adom)
|
|
||||||
return code, respobj
|
|
||||||
|
|
||||||
def unlock_adom(self, adom=None, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Unlocks an ADOM after changes
|
|
||||||
"""
|
|
||||||
if adom:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/unlock/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom)
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/root/workspace/unlock"
|
|
||||||
code, respobj = self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, url))
|
|
||||||
if code == 0 and respobj["status"]["message"].lower() == "ok":
|
|
||||||
self.remove_adom_from_lock_list(adom)
|
|
||||||
return code, respobj
|
|
||||||
|
|
||||||
def commit_changes(self, adom=None, aux=False, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Commits changes to an ADOM
|
|
||||||
"""
|
|
||||||
if adom:
|
|
||||||
if aux:
|
|
||||||
url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom)
|
|
||||||
else:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/commit/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom)
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/root/workspace/commit"
|
|
||||||
return self.send_request(FMGRMethods.EXEC, self._tools.format_request(FMGRMethods.EXEC, url))
|
|
||||||
|
|
||||||
def get_lock_info(self, adom=None):
|
|
||||||
"""
|
|
||||||
Gets ADOM lock info so it can be displayed with the error messages. Or if determined to be locked by ansible
|
|
||||||
for some reason, then unlock it.
|
|
||||||
"""
|
|
||||||
if not adom or adom == "root":
|
|
||||||
url = "/dvmdb/adom/root/workspace/lockinfo"
|
|
||||||
else:
|
|
||||||
if adom.lower() == "global":
|
|
||||||
url = "/dvmdb/global/workspace/lockinfo/"
|
|
||||||
else:
|
|
||||||
url = "/dvmdb/adom/{adom}/workspace/lockinfo/".format(adom=adom)
|
|
||||||
datagram = {}
|
|
||||||
data = self._tools.format_request(FMGRMethods.GET, url, **datagram)
|
|
||||||
resp_obj = self.send_request(FMGRMethods.GET, data)
|
|
||||||
code = resp_obj[0]
|
|
||||||
if code != 0:
|
|
||||||
self._module.fail_json(msg=("An error occurred trying to get the ADOM Lock Info. "
|
|
||||||
"Error: " + to_text(resp_obj)))
|
|
||||||
elif code == 0:
|
|
||||||
try:
|
|
||||||
if resp_obj[1]["status"]["message"] == "OK":
|
|
||||||
self._lock_info = None
|
|
||||||
except Exception:
|
|
||||||
self._lock_info = resp_obj[1]
|
|
||||||
return resp_obj
|
|
||||||
|
|
||||||
def get_adom_list(self):
|
|
||||||
"""
|
|
||||||
Gets the list of ADOMs for the FortiManager
|
|
||||||
"""
|
|
||||||
if self.uses_adoms:
|
|
||||||
url = "/dvmdb/adom"
|
|
||||||
datagram = {}
|
|
||||||
data = self._tools.format_request(FMGRMethods.GET, url, **datagram)
|
|
||||||
resp_obj = self.send_request(FMGRMethods.GET, data)
|
|
||||||
code = resp_obj[0]
|
|
||||||
if code != 0:
|
|
||||||
self._module.fail_json(msg=("An error occurred trying to get the ADOM Info. "
|
|
||||||
"Error: " + to_text(resp_obj)))
|
|
||||||
elif code == 0:
|
|
||||||
num_of_adoms = len(resp_obj[1])
|
|
||||||
append_list = ['root', ]
|
|
||||||
for adom in resp_obj[1]:
|
|
||||||
if adom["tab_status"] != "":
|
|
||||||
append_list.append(to_text(adom["name"]))
|
|
||||||
self._adom_list = append_list
|
|
||||||
return resp_obj
|
|
||||||
|
|
||||||
def get_locked_adom_list(self):
|
|
||||||
"""
|
|
||||||
Gets the list of locked adoms
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
locked_list = list()
|
|
||||||
locked_by_user_list = list()
|
|
||||||
for adom in self._adom_list:
|
|
||||||
adom_lock_info = self.get_lock_info(adom=adom)
|
|
||||||
try:
|
|
||||||
if adom_lock_info[1]["status"]["message"] == "OK":
|
|
||||||
continue
|
|
||||||
except IndexError as err:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
if adom_lock_info[1][0]["lock_user"]:
|
|
||||||
locked_list.append(to_text(adom))
|
|
||||||
if adom_lock_info[1][0]["lock_user"] == self._logged_in_user:
|
|
||||||
locked_by_user_list.append({"adom": to_text(adom),
|
|
||||||
"user": to_text(adom_lock_info[1][0]["lock_user"])})
|
|
||||||
except Exception as err:
|
|
||||||
raise FMGBaseException(err)
|
|
||||||
self._locked_adom_list = locked_list
|
|
||||||
self._locked_adoms_by_user = locked_by_user_list
|
|
||||||
|
|
||||||
except Exception as err:
|
|
||||||
raise FMGBaseException(msg=("An error occurred while trying to get the locked adom list. Error: "
|
|
||||||
+ to_text(err)))
|
|
||||||
|
|
||||||
################################
|
|
||||||
# END DATABASE LOCK CONTEXT CODE
|
|
||||||
################################
|
|
|
@ -1,386 +0,0 @@
|
||||||
# Copyright (c) 2018 Cisco and/or its affiliates.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
---
|
|
||||||
author: Ansible Networking Team
|
|
||||||
httpapi : ftd
|
|
||||||
short_description: HttpApi Plugin for Cisco ASA Firepower device
|
|
||||||
description:
|
|
||||||
- This HttpApi plugin provides methods to connect to Cisco ASA firepower
|
|
||||||
devices over a HTTP(S)-based api.
|
|
||||||
options:
|
|
||||||
token_path:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Specifies the api token path of the FTD device
|
|
||||||
vars:
|
|
||||||
- name: ansible_httpapi_ftd_token_path
|
|
||||||
spec_path:
|
|
||||||
type: str
|
|
||||||
description:
|
|
||||||
- Specifies the api spec path of the FTD device
|
|
||||||
default: '/apispec/ngfw.json'
|
|
||||||
vars:
|
|
||||||
- name: ansible_httpapi_ftd_spec_path
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from ansible import __version__ as ansible_version
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import to_text
|
|
||||||
from ansible.errors import AnsibleConnectionFailure
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.ftd.fdm_swagger_client import FdmSwaggerParser, SpecProp, FdmSwaggerValidator
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.ftd.common import HTTPMethod, ResponseParams
|
|
||||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
|
||||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
|
||||||
from ansible.plugins.httpapi import HttpApiBase
|
|
||||||
from urllib3 import encode_multipart_formdata
|
|
||||||
from urllib3.fields import RequestField
|
|
||||||
from ansible.module_utils.connection import ConnectionError
|
|
||||||
|
|
||||||
BASE_HEADERS = {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
'Accept': 'application/json',
|
|
||||||
'User-Agent': 'FTD Ansible/%s' % ansible_version
|
|
||||||
}
|
|
||||||
|
|
||||||
TOKEN_EXPIRATION_STATUS_CODE = 408
|
|
||||||
UNAUTHORIZED_STATUS_CODE = 401
|
|
||||||
API_TOKEN_PATH_OPTION_NAME = 'token_path'
|
|
||||||
TOKEN_PATH_TEMPLATE = '/api/fdm/{0}/fdm/token'
|
|
||||||
GET_API_VERSIONS_PATH = '/api/versions'
|
|
||||||
DEFAULT_API_VERSIONS = ['v2', 'v1']
|
|
||||||
|
|
||||||
INVALID_API_TOKEN_PATH_MSG = ('The API token path is incorrect. Please, check correctness of '
|
|
||||||
'the `ansible_httpapi_ftd_token_path` variable in the inventory file.')
|
|
||||||
MISSING_API_TOKEN_PATH_MSG = ('Ansible could not determine the API token path automatically. Please, '
|
|
||||||
'specify the `ansible_httpapi_ftd_token_path` variable in the inventory file.')
|
|
||||||
|
|
||||||
|
|
||||||
class HttpApi(HttpApiBase):
|
|
||||||
def __init__(self, connection):
|
|
||||||
super(HttpApi, self).__init__(connection)
|
|
||||||
self.connection = connection
|
|
||||||
self.access_token = None
|
|
||||||
self.refresh_token = None
|
|
||||||
self._api_spec = None
|
|
||||||
self._api_validator = None
|
|
||||||
self._ignore_http_errors = False
|
|
||||||
|
|
||||||
def login(self, username, password):
|
|
||||||
def request_token_payload(username, password):
|
|
||||||
return {
|
|
||||||
'grant_type': 'password',
|
|
||||||
'username': username,
|
|
||||||
'password': password
|
|
||||||
}
|
|
||||||
|
|
||||||
def refresh_token_payload(refresh_token):
|
|
||||||
return {
|
|
||||||
'grant_type': 'refresh_token',
|
|
||||||
'refresh_token': refresh_token
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.refresh_token:
|
|
||||||
payload = refresh_token_payload(self.refresh_token)
|
|
||||||
elif username and password:
|
|
||||||
payload = request_token_payload(username, password)
|
|
||||||
else:
|
|
||||||
raise AnsibleConnectionFailure('Username and password are required for login in absence of refresh token')
|
|
||||||
|
|
||||||
response = self._lookup_login_url(payload)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.refresh_token = response['refresh_token']
|
|
||||||
self.access_token = response['access_token']
|
|
||||||
self.connection._auth = {'Authorization': 'Bearer %s' % self.access_token}
|
|
||||||
except KeyError:
|
|
||||||
raise ConnectionError(
|
|
||||||
'Server returned response without token info during connection authentication: %s' % response)
|
|
||||||
|
|
||||||
def _lookup_login_url(self, payload):
|
|
||||||
""" Try to find correct login URL and get api token using this URL.
|
|
||||||
|
|
||||||
:param payload: Token request payload
|
|
||||||
:type payload: dict
|
|
||||||
:return: token generation response
|
|
||||||
"""
|
|
||||||
preconfigured_token_path = self._get_api_token_path()
|
|
||||||
if preconfigured_token_path:
|
|
||||||
token_paths = [preconfigured_token_path]
|
|
||||||
else:
|
|
||||||
token_paths = self._get_known_token_paths()
|
|
||||||
|
|
||||||
for url in token_paths:
|
|
||||||
try:
|
|
||||||
response = self._send_login_request(payload, url)
|
|
||||||
|
|
||||||
except ConnectionError as e:
|
|
||||||
self.connection.queue_message('vvvv', 'REST:request to %s failed because of connection error: %s ' % (
|
|
||||||
url, e))
|
|
||||||
# In the case of ConnectionError caused by HTTPError we should check response code.
|
|
||||||
# Response code 400 returned in case of invalid credentials so we should stop attempts to log in and
|
|
||||||
# inform the user.
|
|
||||||
if hasattr(e, 'http_code') and e.http_code == 400:
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
if not preconfigured_token_path:
|
|
||||||
self._set_api_token_path(url)
|
|
||||||
return response
|
|
||||||
|
|
||||||
raise ConnectionError(INVALID_API_TOKEN_PATH_MSG if preconfigured_token_path else MISSING_API_TOKEN_PATH_MSG)
|
|
||||||
|
|
||||||
def _send_login_request(self, payload, url):
|
|
||||||
self._display(HTTPMethod.POST, 'login', url)
|
|
||||||
response, response_data = self._send_auth_request(
|
|
||||||
url, json.dumps(payload), method=HTTPMethod.POST, headers=BASE_HEADERS
|
|
||||||
)
|
|
||||||
self._display(HTTPMethod.POST, 'login:status_code', response.getcode())
|
|
||||||
|
|
||||||
response = self._response_to_json(self._get_response_value(response_data))
|
|
||||||
return response
|
|
||||||
|
|
||||||
def logout(self):
|
|
||||||
auth_payload = {
|
|
||||||
'grant_type': 'revoke_token',
|
|
||||||
'access_token': self.access_token,
|
|
||||||
'token_to_revoke': self.refresh_token
|
|
||||||
}
|
|
||||||
|
|
||||||
url = self._get_api_token_path()
|
|
||||||
|
|
||||||
self._display(HTTPMethod.POST, 'logout', url)
|
|
||||||
response, dummy = self._send_auth_request(url, json.dumps(auth_payload), method=HTTPMethod.POST,
|
|
||||||
headers=BASE_HEADERS)
|
|
||||||
self._display(HTTPMethod.POST, 'logout:status_code', response.getcode())
|
|
||||||
|
|
||||||
self.refresh_token = None
|
|
||||||
self.access_token = None
|
|
||||||
|
|
||||||
def _send_auth_request(self, path, data, **kwargs):
|
|
||||||
error_msg_prefix = 'Server returned an error during authentication request'
|
|
||||||
return self._send_service_request(path, error_msg_prefix, data=data, **kwargs)
|
|
||||||
|
|
||||||
def _send_service_request(self, path, error_msg_prefix, data=None, **kwargs):
|
|
||||||
try:
|
|
||||||
self._ignore_http_errors = True
|
|
||||||
return self.connection.send(path, data, **kwargs)
|
|
||||||
except HTTPError as e:
|
|
||||||
# HttpApi connection does not read the error response from HTTPError, so we do it here and wrap it up in
|
|
||||||
# ConnectionError, so the actual error message is displayed to the user.
|
|
||||||
error_msg = self._response_to_json(to_text(e.read()))
|
|
||||||
raise ConnectionError('%s: %s' % (error_msg_prefix, error_msg), http_code=e.code)
|
|
||||||
finally:
|
|
||||||
self._ignore_http_errors = False
|
|
||||||
|
|
||||||
def update_auth(self, response, response_data):
|
|
||||||
# With tokens, authentication should not be checked and updated on each request
|
|
||||||
return None
|
|
||||||
|
|
||||||
def send_request(self, url_path, http_method, body_params=None, path_params=None, query_params=None):
|
|
||||||
url = construct_url_path(url_path, path_params, query_params)
|
|
||||||
data = json.dumps(body_params) if body_params else None
|
|
||||||
try:
|
|
||||||
self._display(http_method, 'url', url)
|
|
||||||
if data:
|
|
||||||
self._display(http_method, 'data', data)
|
|
||||||
|
|
||||||
response, response_data = self.connection.send(url, data, method=http_method, headers=BASE_HEADERS)
|
|
||||||
|
|
||||||
value = self._get_response_value(response_data)
|
|
||||||
self._display(http_method, 'response', value)
|
|
||||||
|
|
||||||
return {
|
|
||||||
ResponseParams.SUCCESS: True,
|
|
||||||
ResponseParams.STATUS_CODE: response.getcode(),
|
|
||||||
ResponseParams.RESPONSE: self._response_to_json(value)
|
|
||||||
}
|
|
||||||
# Being invoked via JSON-RPC, this method does not serialize and pass HTTPError correctly to the method caller.
|
|
||||||
# Thus, in order to handle non-200 responses, we need to wrap them into a simple structure and pass explicitly.
|
|
||||||
except HTTPError as e:
|
|
||||||
error_msg = to_text(e.read())
|
|
||||||
self._display(http_method, 'error', error_msg)
|
|
||||||
return {
|
|
||||||
ResponseParams.SUCCESS: False,
|
|
||||||
ResponseParams.STATUS_CODE: e.code,
|
|
||||||
ResponseParams.RESPONSE: self._response_to_json(error_msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
def upload_file(self, from_path, to_url):
|
|
||||||
url = construct_url_path(to_url)
|
|
||||||
self._display(HTTPMethod.POST, 'upload', url)
|
|
||||||
with open(from_path, 'rb') as src_file:
|
|
||||||
rf = RequestField('fileToUpload', src_file.read(), os.path.basename(src_file.name))
|
|
||||||
rf.make_multipart()
|
|
||||||
body, content_type = encode_multipart_formdata([rf])
|
|
||||||
|
|
||||||
headers = dict(BASE_HEADERS)
|
|
||||||
headers['Content-Type'] = content_type
|
|
||||||
headers['Content-Length'] = len(body)
|
|
||||||
|
|
||||||
dummy, response_data = self.connection.send(url, data=body, method=HTTPMethod.POST, headers=headers)
|
|
||||||
value = self._get_response_value(response_data)
|
|
||||||
self._display(HTTPMethod.POST, 'upload:response', value)
|
|
||||||
return self._response_to_json(value)
|
|
||||||
|
|
||||||
def download_file(self, from_url, to_path, path_params=None):
|
|
||||||
url = construct_url_path(from_url, path_params=path_params)
|
|
||||||
self._display(HTTPMethod.GET, 'download', url)
|
|
||||||
response, response_data = self.connection.send(url, data=None, method=HTTPMethod.GET, headers=BASE_HEADERS)
|
|
||||||
|
|
||||||
if os.path.isdir(to_path):
|
|
||||||
filename = extract_filename_from_headers(response.info())
|
|
||||||
to_path = os.path.join(to_path, filename)
|
|
||||||
|
|
||||||
with open(to_path, "wb") as output_file:
|
|
||||||
output_file.write(response_data.getvalue())
|
|
||||||
self._display(HTTPMethod.GET, 'downloaded', to_path)
|
|
||||||
|
|
||||||
def handle_httperror(self, exc):
|
|
||||||
is_auth_related_code = exc.code == TOKEN_EXPIRATION_STATUS_CODE or exc.code == UNAUTHORIZED_STATUS_CODE
|
|
||||||
if not self._ignore_http_errors and is_auth_related_code:
|
|
||||||
self.connection._auth = None
|
|
||||||
self.login(self.connection.get_option('remote_user'), self.connection.get_option('password'))
|
|
||||||
return True
|
|
||||||
# False means that the exception will be passed further to the caller
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _display(self, http_method, title, msg=''):
|
|
||||||
self.connection.queue_message('vvvv', 'REST:%s:%s:%s\n%s' % (http_method, self.connection._url, title, msg))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _get_response_value(response_data):
|
|
||||||
return to_text(response_data.getvalue())
|
|
||||||
|
|
||||||
def _get_api_spec_path(self):
|
|
||||||
return self.get_option('spec_path')
|
|
||||||
|
|
||||||
def _get_known_token_paths(self):
|
|
||||||
"""Generate list of token generation urls based on list of versions supported by device(if exposed via API) or
|
|
||||||
default list of API versions.
|
|
||||||
|
|
||||||
:returns: list of token generation urls
|
|
||||||
:rtype: generator
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
api_versions = self._get_supported_api_versions()
|
|
||||||
except ConnectionError:
|
|
||||||
# API versions API is not supported we need to check all known version
|
|
||||||
api_versions = DEFAULT_API_VERSIONS
|
|
||||||
|
|
||||||
return [TOKEN_PATH_TEMPLATE.format(version) for version in api_versions]
|
|
||||||
|
|
||||||
def _get_supported_api_versions(self):
|
|
||||||
"""
|
|
||||||
Fetch list of API versions supported by device.
|
|
||||||
|
|
||||||
:return: list of API versions suitable for device
|
|
||||||
:rtype: list
|
|
||||||
"""
|
|
||||||
# Try to fetch supported API version
|
|
||||||
http_method = HTTPMethod.GET
|
|
||||||
response, response_data = self._send_service_request(
|
|
||||||
path=GET_API_VERSIONS_PATH,
|
|
||||||
error_msg_prefix="Can't fetch list of supported api versions",
|
|
||||||
method=http_method,
|
|
||||||
headers=BASE_HEADERS
|
|
||||||
)
|
|
||||||
|
|
||||||
value = self._get_response_value(response_data)
|
|
||||||
self._display(http_method, 'response', value)
|
|
||||||
api_versions_info = self._response_to_json(value)
|
|
||||||
return api_versions_info["supportedVersions"]
|
|
||||||
|
|
||||||
def _get_api_token_path(self):
|
|
||||||
return self.get_option(API_TOKEN_PATH_OPTION_NAME)
|
|
||||||
|
|
||||||
def _set_api_token_path(self, url):
|
|
||||||
return self.set_option(API_TOKEN_PATH_OPTION_NAME, url)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _response_to_json(response_text):
|
|
||||||
try:
|
|
||||||
return json.loads(response_text) if response_text else {}
|
|
||||||
# JSONDecodeError only available on Python 3.5+
|
|
||||||
except getattr(json.decoder, 'JSONDecodeError', ValueError):
|
|
||||||
raise ConnectionError('Invalid JSON response: %s' % response_text)
|
|
||||||
|
|
||||||
def get_operation_spec(self, operation_name):
|
|
||||||
return self.api_spec[SpecProp.OPERATIONS].get(operation_name, None)
|
|
||||||
|
|
||||||
def get_operation_specs_by_model_name(self, model_name):
|
|
||||||
if model_name:
|
|
||||||
return self.api_spec[SpecProp.MODEL_OPERATIONS].get(model_name, None)
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_model_spec(self, model_name):
|
|
||||||
return self.api_spec[SpecProp.MODELS].get(model_name, None)
|
|
||||||
|
|
||||||
def validate_data(self, operation_name, data):
|
|
||||||
return self.api_validator.validate_data(operation_name, data)
|
|
||||||
|
|
||||||
def validate_query_params(self, operation_name, params):
|
|
||||||
return self.api_validator.validate_query_params(operation_name, params)
|
|
||||||
|
|
||||||
def validate_path_params(self, operation_name, params):
|
|
||||||
return self.api_validator.validate_path_params(operation_name, params)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def api_spec(self):
|
|
||||||
if self._api_spec is None:
|
|
||||||
spec_path_url = self._get_api_spec_path()
|
|
||||||
response = self.send_request(url_path=spec_path_url, http_method=HTTPMethod.GET)
|
|
||||||
if response[ResponseParams.SUCCESS]:
|
|
||||||
self._api_spec = FdmSwaggerParser().parse_spec(response[ResponseParams.RESPONSE])
|
|
||||||
else:
|
|
||||||
raise ConnectionError('Failed to download API specification. Status code: %s. Response: %s' % (
|
|
||||||
response[ResponseParams.STATUS_CODE], response[ResponseParams.RESPONSE]))
|
|
||||||
return self._api_spec
|
|
||||||
|
|
||||||
@property
|
|
||||||
def api_validator(self):
|
|
||||||
if self._api_validator is None:
|
|
||||||
self._api_validator = FdmSwaggerValidator(self.api_spec)
|
|
||||||
return self._api_validator
|
|
||||||
|
|
||||||
|
|
||||||
def construct_url_path(path, path_params=None, query_params=None):
|
|
||||||
url = path
|
|
||||||
if path_params:
|
|
||||||
url = url.format(**path_params)
|
|
||||||
if query_params:
|
|
||||||
url += "?" + urlencode(query_params)
|
|
||||||
return url
|
|
||||||
|
|
||||||
|
|
||||||
def extract_filename_from_headers(response_info):
|
|
||||||
content_header_regex = r'attachment; ?filename="?([^"]+)'
|
|
||||||
match = re.match(content_header_regex, response_info.get('Content-Disposition'))
|
|
||||||
if match:
|
|
||||||
return match.group(1)
|
|
||||||
else:
|
|
||||||
raise ValueError("No appropriate Content-Disposition header is specified.")
|
|
|
@ -1,127 +0,0 @@
|
||||||
# python 3 headers, required if submitting to Ansible
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
lookup: avi
|
|
||||||
author: Sandeep Bandi <sandeepb@avinetworks.com>
|
|
||||||
short_description: Look up ``Avi`` objects.
|
|
||||||
description:
|
|
||||||
- Given an object_type, fetch all the objects of that type or fetch
|
|
||||||
the specific object that matches the name/uuid given via options.
|
|
||||||
- For single object lookup. If you want the output to be a list, you may
|
|
||||||
want to pass option wantlist=True to the plugin.
|
|
||||||
|
|
||||||
options:
|
|
||||||
obj_type:
|
|
||||||
description:
|
|
||||||
- type of object to query
|
|
||||||
required: True
|
|
||||||
obj_name:
|
|
||||||
description:
|
|
||||||
- name of the object to query
|
|
||||||
obj_uuid:
|
|
||||||
description:
|
|
||||||
- UUID of the object to query
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- community.general.avi
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = """
|
|
||||||
# Lookup query for all the objects of a specific type.
|
|
||||||
- debug: msg="{{ lookup('avi', avi_credentials=avi_credentials, obj_type='virtualservice') }}"
|
|
||||||
# Lookup query for an object with the given name and type.
|
|
||||||
- debug: msg="{{ lookup('avi', avi_credentials=avi_credentials, obj_name='vs1', obj_type='virtualservice', wantlist=True) }}"
|
|
||||||
# Lookup query for an object with the given UUID and type.
|
|
||||||
- debug: msg="{{ lookup('avi', obj_uuid='virtualservice-5c0e183a-690a-45d8-8d6f-88c30a52550d', obj_type='virtualservice') }}"
|
|
||||||
# We can replace lookup with query function to always the get the output as list.
|
|
||||||
# This is helpful for looping.
|
|
||||||
- debug: msg="{{ query('avi', obj_uuid='virtualservice-5c0e183a-690a-45d8-8d6f-88c30a52550d', obj_type='virtualservice') }}"
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = """
|
|
||||||
_raw:
|
|
||||||
description:
|
|
||||||
- One ore more objects returned from ``Avi`` API.
|
|
||||||
type: list
|
|
||||||
elements: dictionary
|
|
||||||
"""
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_native
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import (ApiSession,
|
|
||||||
AviCredentials,
|
|
||||||
AviServerError,
|
|
||||||
ObjectNotFound,
|
|
||||||
APIError)
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
def _api(avi_session, path, **kwargs):
|
|
||||||
'''
|
|
||||||
Generic function to handle both /<obj_type>/<obj_uuid> and /<obj_type>
|
|
||||||
API resource endpoints.
|
|
||||||
'''
|
|
||||||
rsp = []
|
|
||||||
try:
|
|
||||||
rsp_data = avi_session.get(path, **kwargs).json()
|
|
||||||
if 'results' in rsp_data:
|
|
||||||
rsp = rsp_data['results']
|
|
||||||
else:
|
|
||||||
rsp.append(rsp_data)
|
|
||||||
except ObjectNotFound as e:
|
|
||||||
display.warning('Resource not found. Please check obj_name/'
|
|
||||||
'obj_uuid/obj_type are spelled correctly.')
|
|
||||||
display.v(to_native(e))
|
|
||||||
except (AviServerError, APIError) as e:
|
|
||||||
raise AnsibleError(to_native(e))
|
|
||||||
except Exception as e:
|
|
||||||
# Generic excption handling for connection failures
|
|
||||||
raise AnsibleError('Unable to communicate with controller'
|
|
||||||
'due to error: %s' % to_native(e))
|
|
||||||
|
|
||||||
return rsp
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
|
||||||
def run(self, terms, variables=None, avi_credentials=None, **kwargs):
|
|
||||||
|
|
||||||
api_creds = AviCredentials(**avi_credentials)
|
|
||||||
# Create the session using avi_credentials
|
|
||||||
try:
|
|
||||||
avi = ApiSession(avi_credentials=api_creds)
|
|
||||||
except Exception as e:
|
|
||||||
raise AnsibleError(to_native(e))
|
|
||||||
|
|
||||||
# Return an empty list if the object is not found
|
|
||||||
rsp = []
|
|
||||||
try:
|
|
||||||
path = kwargs.pop('obj_type')
|
|
||||||
except KeyError:
|
|
||||||
raise AnsibleError("Please pass the obj_type for lookup")
|
|
||||||
|
|
||||||
if kwargs.get('obj_name', None):
|
|
||||||
name = kwargs.pop('obj_name')
|
|
||||||
try:
|
|
||||||
display.v("Fetching obj: %s of type: %s" % (name, path))
|
|
||||||
rsp_data = avi.get_object_by_name(path, name, **kwargs)
|
|
||||||
if rsp_data:
|
|
||||||
# Append the return data only if it is not None. i.e object
|
|
||||||
# with specified name is present
|
|
||||||
rsp.append(rsp_data)
|
|
||||||
except AviServerError as e:
|
|
||||||
raise AnsibleError(to_native(e))
|
|
||||||
elif kwargs.get('obj_uuid', None):
|
|
||||||
obj_uuid = kwargs.pop('obj_uuid')
|
|
||||||
obj_path = "%s/%s" % (path, obj_uuid)
|
|
||||||
display.v("Fetching obj: %s of type: %s" % (obj_uuid, path))
|
|
||||||
rsp = _api(avi, obj_path, **kwargs)
|
|
||||||
else:
|
|
||||||
display.v("Fetching all objects of type: %s" % path)
|
|
||||||
rsp = _api(avi, path, **kwargs)
|
|
||||||
|
|
||||||
return rsp
|
|
|
@ -1,383 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright 2016 F5 Networks Inc.
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
|
|
||||||
# Legacy
|
|
||||||
|
|
||||||
try:
|
|
||||||
import bigsuds
|
|
||||||
bigsuds_found = True
|
|
||||||
except ImportError:
|
|
||||||
bigsuds_found = False
|
|
||||||
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
|
|
||||||
|
|
||||||
def f5_argument_spec():
|
|
||||||
return dict(
|
|
||||||
server=dict(
|
|
||||||
type='str',
|
|
||||||
required=True,
|
|
||||||
fallback=(env_fallback, ['F5_SERVER'])
|
|
||||||
),
|
|
||||||
user=dict(
|
|
||||||
type='str',
|
|
||||||
required=True,
|
|
||||||
fallback=(env_fallback, ['F5_USER'])
|
|
||||||
),
|
|
||||||
password=dict(
|
|
||||||
type='str',
|
|
||||||
aliases=['pass', 'pwd'],
|
|
||||||
required=True,
|
|
||||||
no_log=True,
|
|
||||||
fallback=(env_fallback, ['F5_PASSWORD'])
|
|
||||||
),
|
|
||||||
validate_certs=dict(
|
|
||||||
default='yes',
|
|
||||||
type='bool',
|
|
||||||
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
|
|
||||||
),
|
|
||||||
server_port=dict(
|
|
||||||
type='int',
|
|
||||||
default=443,
|
|
||||||
fallback=(env_fallback, ['F5_SERVER_PORT'])
|
|
||||||
),
|
|
||||||
state=dict(
|
|
||||||
type='str',
|
|
||||||
default='present',
|
|
||||||
choices=['present', 'absent']
|
|
||||||
),
|
|
||||||
partition=dict(
|
|
||||||
type='str',
|
|
||||||
default='Common',
|
|
||||||
fallback=(env_fallback, ['F5_PARTITION'])
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def f5_parse_arguments(module):
|
|
||||||
if not bigsuds_found:
|
|
||||||
module.fail_json(msg="the python bigsuds module is required")
|
|
||||||
|
|
||||||
if module.params['validate_certs']:
|
|
||||||
import ssl
|
|
||||||
if not hasattr(ssl, 'SSLContext'):
|
|
||||||
module.fail_json(
|
|
||||||
msg="bigsuds does not support verifying certificates with python < 2.7.9."
|
|
||||||
"Either update python or set validate_certs=False on the task'")
|
|
||||||
|
|
||||||
return (
|
|
||||||
module.params['server'],
|
|
||||||
module.params['user'],
|
|
||||||
module.params['password'],
|
|
||||||
module.params['state'],
|
|
||||||
module.params['partition'],
|
|
||||||
module.params['validate_certs'],
|
|
||||||
module.params['server_port']
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def bigip_api(bigip, user, password, validate_certs, port=443):
|
|
||||||
try:
|
|
||||||
if bigsuds.__version__ >= '1.0.4':
|
|
||||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port)
|
|
||||||
elif bigsuds.__version__ == '1.0.3':
|
|
||||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
|
|
||||||
else:
|
|
||||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
|
||||||
except TypeError:
|
|
||||||
# bigsuds < 1.0.3, no verify param
|
|
||||||
if validate_certs:
|
|
||||||
# Note: verified we have SSLContext when we parsed params
|
|
||||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
|
||||||
else:
|
|
||||||
import ssl
|
|
||||||
if hasattr(ssl, 'SSLContext'):
|
|
||||||
# Really, you should never do this. It disables certificate
|
|
||||||
# verification *globally*. But since older bigip libraries
|
|
||||||
# don't give us a way to toggle verification we need to
|
|
||||||
# disable it at the global level.
|
|
||||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
|
||||||
ssl._create_default_https_context = ssl._create_unverified_context
|
|
||||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
|
||||||
|
|
||||||
return api
|
|
||||||
|
|
||||||
|
|
||||||
# Fully Qualified name (with the partition)
|
|
||||||
def fq_name(partition, name):
|
|
||||||
if name is not None and not name.startswith('/'):
|
|
||||||
return '/%s/%s' % (partition, name)
|
|
||||||
return name
|
|
||||||
|
|
||||||
|
|
||||||
# Fully Qualified name (with partition) for a list
|
|
||||||
def fq_list_names(partition, list_names):
|
|
||||||
if list_names is None:
|
|
||||||
return None
|
|
||||||
return map(lambda x: fq_name(partition, x), list_names)
|
|
||||||
|
|
||||||
|
|
||||||
def to_commands(module, commands):
|
|
||||||
spec = {
|
|
||||||
'command': dict(key=True),
|
|
||||||
'prompt': dict(),
|
|
||||||
'answer': dict()
|
|
||||||
}
|
|
||||||
transform = ComplexList(spec, module)
|
|
||||||
return transform(commands)
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
responses = list()
|
|
||||||
commands = to_commands(module, to_list(commands))
|
|
||||||
for cmd in commands:
|
|
||||||
cmd = module.jsonify(cmd)
|
|
||||||
rc, out, err = exec_command(module, cmd)
|
|
||||||
if check_rc and rc != 0:
|
|
||||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
|
|
||||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
# New style
|
|
||||||
|
|
||||||
from abc import ABCMeta, abstractproperty
|
|
||||||
from collections import defaultdict
|
|
||||||
|
|
||||||
try:
|
|
||||||
from f5.bigip import ManagementRoot as BigIpMgmt
|
|
||||||
from f5.bigip.contexts import TransactionContextManager as BigIpTxContext
|
|
||||||
|
|
||||||
from f5.bigiq import ManagementRoot as BigIqMgmt
|
|
||||||
|
|
||||||
from f5.iworkflow import ManagementRoot as iWorkflowMgmt
|
|
||||||
from icontrol.exceptions import iControlUnexpectedHTTPError
|
|
||||||
HAS_F5SDK = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_F5SDK = False
|
|
||||||
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
|
||||||
from ansible.module_utils.six import iteritems, with_metaclass
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
|
||||||
from ansible.module_utils.connection import exec_command
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
|
|
||||||
|
|
||||||
F5_COMMON_ARGS = dict(
|
|
||||||
server=dict(
|
|
||||||
type='str',
|
|
||||||
required=True,
|
|
||||||
fallback=(env_fallback, ['F5_SERVER'])
|
|
||||||
),
|
|
||||||
user=dict(
|
|
||||||
type='str',
|
|
||||||
required=True,
|
|
||||||
fallback=(env_fallback, ['F5_USER'])
|
|
||||||
),
|
|
||||||
password=dict(
|
|
||||||
type='str',
|
|
||||||
aliases=['pass', 'pwd'],
|
|
||||||
required=True,
|
|
||||||
no_log=True,
|
|
||||||
fallback=(env_fallback, ['F5_PASSWORD'])
|
|
||||||
),
|
|
||||||
validate_certs=dict(
|
|
||||||
default='yes',
|
|
||||||
type='bool',
|
|
||||||
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
|
|
||||||
),
|
|
||||||
server_port=dict(
|
|
||||||
type='int',
|
|
||||||
default=443,
|
|
||||||
fallback=(env_fallback, ['F5_SERVER_PORT'])
|
|
||||||
),
|
|
||||||
state=dict(
|
|
||||||
type='str',
|
|
||||||
default='present',
|
|
||||||
choices=['present', 'absent']
|
|
||||||
),
|
|
||||||
partition=dict(
|
|
||||||
type='str',
|
|
||||||
default='Common',
|
|
||||||
fallback=(env_fallback, ['F5_PARTITION'])
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleF5Client(object):
|
|
||||||
def __init__(self, argument_spec=None, supports_check_mode=False,
|
|
||||||
mutually_exclusive=None, required_together=None,
|
|
||||||
required_if=None, required_one_of=None, add_file_common_args=False,
|
|
||||||
f5_product_name='bigip', sans_state=False, sans_partition=False):
|
|
||||||
|
|
||||||
self.f5_product_name = f5_product_name
|
|
||||||
|
|
||||||
merged_arg_spec = dict()
|
|
||||||
merged_arg_spec.update(F5_COMMON_ARGS)
|
|
||||||
if argument_spec:
|
|
||||||
merged_arg_spec.update(argument_spec)
|
|
||||||
if sans_state:
|
|
||||||
del merged_arg_spec['state']
|
|
||||||
if sans_partition:
|
|
||||||
del merged_arg_spec['partition']
|
|
||||||
self.arg_spec = merged_arg_spec
|
|
||||||
|
|
||||||
mutually_exclusive_params = []
|
|
||||||
if mutually_exclusive:
|
|
||||||
mutually_exclusive_params += mutually_exclusive
|
|
||||||
|
|
||||||
required_together_params = []
|
|
||||||
if required_together:
|
|
||||||
required_together_params += required_together
|
|
||||||
|
|
||||||
self.module = AnsibleModule(
|
|
||||||
argument_spec=merged_arg_spec,
|
|
||||||
supports_check_mode=supports_check_mode,
|
|
||||||
mutually_exclusive=mutually_exclusive_params,
|
|
||||||
required_together=required_together_params,
|
|
||||||
required_if=required_if,
|
|
||||||
required_one_of=required_one_of,
|
|
||||||
add_file_common_args=add_file_common_args
|
|
||||||
)
|
|
||||||
|
|
||||||
self.check_mode = self.module.check_mode
|
|
||||||
self._connect_params = self._get_connect_params()
|
|
||||||
|
|
||||||
if 'transport' not in self.module.params or self.module.params['transport'] != 'cli':
|
|
||||||
try:
|
|
||||||
self.api = self._get_mgmt_root(
|
|
||||||
f5_product_name, **self._connect_params
|
|
||||||
)
|
|
||||||
except iControlUnexpectedHTTPError as exc:
|
|
||||||
self.fail(str(exc))
|
|
||||||
|
|
||||||
def fail(self, msg):
|
|
||||||
self.module.fail_json(msg=msg)
|
|
||||||
|
|
||||||
def _get_connect_params(self):
|
|
||||||
params = dict(
|
|
||||||
user=self.module.params['user'],
|
|
||||||
password=self.module.params['password'],
|
|
||||||
server=self.module.params['server'],
|
|
||||||
server_port=self.module.params['server_port'],
|
|
||||||
validate_certs=self.module.params['validate_certs']
|
|
||||||
)
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _get_mgmt_root(self, type, **kwargs):
|
|
||||||
if type == 'bigip':
|
|
||||||
return BigIpMgmt(
|
|
||||||
kwargs['server'],
|
|
||||||
kwargs['user'],
|
|
||||||
kwargs['password'],
|
|
||||||
port=kwargs['server_port'],
|
|
||||||
token='tmos'
|
|
||||||
)
|
|
||||||
elif type == 'iworkflow':
|
|
||||||
return iWorkflowMgmt(
|
|
||||||
kwargs['server'],
|
|
||||||
kwargs['user'],
|
|
||||||
kwargs['password'],
|
|
||||||
port=kwargs['server_port'],
|
|
||||||
token='local'
|
|
||||||
)
|
|
||||||
elif type == 'bigiq':
|
|
||||||
return BigIqMgmt(
|
|
||||||
kwargs['server'],
|
|
||||||
kwargs['user'],
|
|
||||||
kwargs['password'],
|
|
||||||
port=kwargs['server_port'],
|
|
||||||
auth_provider='local'
|
|
||||||
)
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Attempts to reconnect to a device
|
|
||||||
|
|
||||||
The existing token from a ManagementRoot can become invalid if you,
|
|
||||||
for example, upgrade the device (such as is done in the *_software
|
|
||||||
module.
|
|
||||||
|
|
||||||
This method can be used to reconnect to a remote device without
|
|
||||||
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
|
|
||||||
it will use the same values that were initially provided to those
|
|
||||||
classes
|
|
||||||
|
|
||||||
:return:
|
|
||||||
:raises iControlUnexpectedHTTPError
|
|
||||||
"""
|
|
||||||
self.api = self._get_mgmt_root(
|
|
||||||
self.f5_product_name, **self._connect_params
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleF5Parameters(object):
|
|
||||||
def __init__(self, params=None):
|
|
||||||
self._values = defaultdict(lambda: None)
|
|
||||||
self._values['__warnings'] = []
|
|
||||||
if params:
|
|
||||||
self.update(params=params)
|
|
||||||
|
|
||||||
def update(self, params=None):
|
|
||||||
if params:
|
|
||||||
for k, v in iteritems(params):
|
|
||||||
if self.api_map is not None and k in self.api_map:
|
|
||||||
dict_to_use = self.api_map
|
|
||||||
map_key = self.api_map[k]
|
|
||||||
else:
|
|
||||||
dict_to_use = self._values
|
|
||||||
map_key = k
|
|
||||||
|
|
||||||
# Handle weird API parameters like `dns.proxy.__iter__` by
|
|
||||||
# using a map provided by the module developer
|
|
||||||
class_attr = getattr(type(self), map_key, None)
|
|
||||||
if isinstance(class_attr, property):
|
|
||||||
# There is a mapped value for the api_map key
|
|
||||||
if class_attr.fset is None:
|
|
||||||
# If the mapped value does not have an associated setter
|
|
||||||
self._values[map_key] = v
|
|
||||||
else:
|
|
||||||
# The mapped value has a setter
|
|
||||||
setattr(self, map_key, v)
|
|
||||||
else:
|
|
||||||
# If the mapped value is not a @property
|
|
||||||
self._values[map_key] = v
|
|
||||||
|
|
||||||
def __getattr__(self, item):
|
|
||||||
# Ensures that properties that weren't defined, and therefore stashed
|
|
||||||
# in the `_values` dict, will be retrievable.
|
|
||||||
return self._values[item]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def partition(self):
|
|
||||||
if self._values['partition'] is None:
|
|
||||||
return 'Common'
|
|
||||||
return self._values['partition'].strip('/')
|
|
||||||
|
|
||||||
@partition.setter
|
|
||||||
def partition(self, value):
|
|
||||||
self._values['partition'] = value
|
|
||||||
|
|
||||||
def _filter_params(self, params):
|
|
||||||
return dict((k, v) for k, v in iteritems(params) if v is not None)
|
|
||||||
|
|
||||||
|
|
||||||
class F5ModuleError(Exception):
|
|
||||||
pass
|
|
|
@ -1,153 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.module_utils.urls import fetch_url
|
|
||||||
|
|
||||||
|
|
||||||
AXAPI_PORT_PROTOCOLS = {
|
|
||||||
'tcp': 2,
|
|
||||||
'udp': 3,
|
|
||||||
}
|
|
||||||
|
|
||||||
AXAPI_VPORT_PROTOCOLS = {
|
|
||||||
'tcp': 2,
|
|
||||||
'udp': 3,
|
|
||||||
'fast-http': 9,
|
|
||||||
'http': 11,
|
|
||||||
'https': 12,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def a10_argument_spec():
|
|
||||||
return dict(
|
|
||||||
host=dict(type='str', required=True),
|
|
||||||
username=dict(type='str', aliases=['user', 'admin'], required=True),
|
|
||||||
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
|
|
||||||
write_config=dict(type='bool', default=False)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_failure(result):
|
|
||||||
if 'response' in result and result['response'].get('status') == 'fail':
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_call(module, url, post=None):
|
|
||||||
'''
|
|
||||||
Returns a datastructure based on the result of the API call
|
|
||||||
'''
|
|
||||||
rsp, info = fetch_url(module, url, data=post)
|
|
||||||
if not rsp or info['status'] >= 400:
|
|
||||||
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
|
|
||||||
try:
|
|
||||||
raw_data = rsp.read()
|
|
||||||
data = json.loads(raw_data)
|
|
||||||
except ValueError:
|
|
||||||
# at least one API call (system.action.write_config) returns
|
|
||||||
# XML even when JSON is requested, so do some minimal handling
|
|
||||||
# here to prevent failing even when the call succeeded
|
|
||||||
if 'status="ok"' in raw_data.lower():
|
|
||||||
data = {"response": {"status": "OK"}}
|
|
||||||
else:
|
|
||||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
|
||||||
except Exception:
|
|
||||||
module.fail_json(msg="could not read the result from the host")
|
|
||||||
finally:
|
|
||||||
rsp.close()
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_authenticate(module, base_url, username, password):
|
|
||||||
url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
|
|
||||||
result = axapi_call(module, url)
|
|
||||||
if axapi_failure(result):
|
|
||||||
return module.fail_json(msg=result['response']['err']['msg'])
|
|
||||||
sessid = result['session_id']
|
|
||||||
return base_url + '&session_id=' + sessid
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_authenticate_v3(module, base_url, username, password):
|
|
||||||
url = base_url
|
|
||||||
auth_payload = {"credentials": {"username": username, "password": password}}
|
|
||||||
result = axapi_call_v3(module, url, method='POST', body=json.dumps(auth_payload))
|
|
||||||
if axapi_failure(result):
|
|
||||||
return module.fail_json(msg=result['response']['err']['msg'])
|
|
||||||
signature = result['authresponse']['signature']
|
|
||||||
return signature
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_call_v3(module, url, method=None, body=None, signature=None):
|
|
||||||
'''
|
|
||||||
Returns a datastructure based on the result of the API call
|
|
||||||
'''
|
|
||||||
if signature:
|
|
||||||
headers = {'content-type': 'application/json', 'Authorization': 'A10 %s' % signature}
|
|
||||||
else:
|
|
||||||
headers = {'content-type': 'application/json'}
|
|
||||||
rsp, info = fetch_url(module, url, method=method, data=body, headers=headers)
|
|
||||||
if not rsp or info['status'] >= 400:
|
|
||||||
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
|
|
||||||
try:
|
|
||||||
raw_data = rsp.read()
|
|
||||||
data = json.loads(raw_data)
|
|
||||||
except ValueError:
|
|
||||||
# at least one API call (system.action.write_config) returns
|
|
||||||
# XML even when JSON is requested, so do some minimal handling
|
|
||||||
# here to prevent failing even when the call succeeded
|
|
||||||
if 'status="ok"' in raw_data.lower():
|
|
||||||
data = {"response": {"status": "OK"}}
|
|
||||||
else:
|
|
||||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
|
||||||
except Exception:
|
|
||||||
module.fail_json(msg="could not read the result from the host")
|
|
||||||
finally:
|
|
||||||
rsp.close()
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_enabled_disabled(flag):
|
|
||||||
'''
|
|
||||||
The axapi uses 0/1 integer values for flags, rather than strings
|
|
||||||
or booleans, so convert the given flag to a 0 or 1. For now, params
|
|
||||||
are specified as strings only so thats what we check.
|
|
||||||
'''
|
|
||||||
if flag == 'enabled':
|
|
||||||
return 1
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_get_port_protocol(protocol):
|
|
||||||
return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
|
|
||||||
|
|
||||||
|
|
||||||
def axapi_get_vport_protocol(protocol):
|
|
||||||
return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
|
|
|
@ -1,129 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
|
||||||
from ansible.module_utils.connection import exec_command
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = {}
|
|
||||||
|
|
||||||
aireos_provider_spec = {
|
|
||||||
'host': dict(),
|
|
||||||
'port': dict(type='int'),
|
|
||||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
|
||||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
|
||||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
|
||||||
'timeout': dict(type='int'),
|
|
||||||
}
|
|
||||||
aireos_argument_spec = {
|
|
||||||
'provider': dict(type='dict', options=aireos_provider_spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
aireos_top_spec = {
|
|
||||||
'host': dict(removed_in_version=2.9),
|
|
||||||
'port': dict(removed_in_version=2.9, type='int'),
|
|
||||||
'username': dict(removed_in_version=2.9),
|
|
||||||
'password': dict(removed_in_version=2.9, no_log=True),
|
|
||||||
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
|
|
||||||
'timeout': dict(removed_in_version=2.9, type='int'),
|
|
||||||
}
|
|
||||||
aireos_argument_spec.update(aireos_top_spec)
|
|
||||||
|
|
||||||
|
|
||||||
def sanitize(resp):
|
|
||||||
# Takes response from device and strips whitespace from all lines
|
|
||||||
# Aireos adds in extra preceding whitespace which netcfg parses as children/parents, which Aireos does not do
|
|
||||||
# Aireos also adds in trailing whitespace that is unused
|
|
||||||
cleaned = []
|
|
||||||
for line in resp.splitlines():
|
|
||||||
cleaned.append(line.strip())
|
|
||||||
return '\n'.join(cleaned).strip()
|
|
||||||
|
|
||||||
|
|
||||||
def get_provider_argspec():
|
|
||||||
return aireos_provider_spec
|
|
||||||
|
|
||||||
|
|
||||||
def check_args(module, warnings):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module, flags=None):
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
|
|
||||||
cmd = 'show run-config commands '
|
|
||||||
cmd += ' '.join(flags)
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _DEVICE_CONFIGS[cmd]
|
|
||||||
except KeyError:
|
|
||||||
rc, out, err = exec_command(module, cmd)
|
|
||||||
if rc != 0:
|
|
||||||
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
|
|
||||||
cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
|
|
||||||
_DEVICE_CONFIGS[cmd] = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def to_commands(module, commands):
|
|
||||||
spec = {
|
|
||||||
'command': dict(key=True),
|
|
||||||
'prompt': dict(),
|
|
||||||
'answer': dict()
|
|
||||||
}
|
|
||||||
transform = ComplexList(spec, module)
|
|
||||||
return transform(commands)
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
responses = list()
|
|
||||||
commands = to_commands(module, to_list(commands))
|
|
||||||
for cmd in commands:
|
|
||||||
cmd = module.jsonify(cmd)
|
|
||||||
rc, out, err = exec_command(module, cmd)
|
|
||||||
if check_rc and rc != 0:
|
|
||||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
|
|
||||||
responses.append(sanitize(to_text(out, errors='surrogate_then_replace')))
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, commands):
|
|
||||||
|
|
||||||
rc, out, err = exec_command(module, 'config')
|
|
||||||
if rc != 0:
|
|
||||||
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
|
|
||||||
|
|
||||||
for command in to_list(commands):
|
|
||||||
if command == 'end':
|
|
||||||
continue
|
|
||||||
rc, out, err = exec_command(module, command)
|
|
||||||
if rc != 0:
|
|
||||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
|
|
||||||
|
|
||||||
exec_command(module, 'end')
|
|
|
@ -1,180 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2017 Apstra Inc, <community@apstra.com>
|
|
||||||
#
|
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
This module adds shared support for Apstra AOS modules
|
|
||||||
|
|
||||||
In order to use this module, include it as part of your module
|
|
||||||
|
|
||||||
from ansible.module_utils.network.aos.aos import (check_aos_version, get_aos_session, find_collection_item,
|
|
||||||
content_to_dict, do_load_resource)
|
|
||||||
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
|
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
|
||||||
try:
|
|
||||||
import yaml
|
|
||||||
HAS_YAML = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_YAML = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
from apstra.aosom.session import Session
|
|
||||||
|
|
||||||
HAS_AOS_PYEZ = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_AOS_PYEZ = False
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_native
|
|
||||||
|
|
||||||
|
|
||||||
def check_aos_version(module, min=False):
|
|
||||||
"""
|
|
||||||
Check if the library aos-pyez is present.
|
|
||||||
If provided, also check if the minimum version requirement is met
|
|
||||||
"""
|
|
||||||
if not HAS_AOS_PYEZ:
|
|
||||||
module.fail_json(msg='aos-pyez is not installed. Please see details '
|
|
||||||
'here: https://github.com/Apstra/aos-pyez')
|
|
||||||
|
|
||||||
elif min:
|
|
||||||
import apstra.aosom
|
|
||||||
AOS_PYEZ_VERSION = apstra.aosom.__version__
|
|
||||||
|
|
||||||
if LooseVersion(AOS_PYEZ_VERSION) < LooseVersion(min):
|
|
||||||
module.fail_json(msg='aos-pyez >= %s is required for this module' % min)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def get_aos_session(module, auth):
|
|
||||||
"""
|
|
||||||
Resume an existing session and return an AOS object.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
auth (dict): An AOS session as obtained by aos_login module blocks::
|
|
||||||
|
|
||||||
dict( token=<token>,
|
|
||||||
server=<ip>,
|
|
||||||
port=<port>
|
|
||||||
)
|
|
||||||
|
|
||||||
Return:
|
|
||||||
Aos object
|
|
||||||
"""
|
|
||||||
|
|
||||||
check_aos_version(module)
|
|
||||||
|
|
||||||
aos = Session()
|
|
||||||
aos.session = auth
|
|
||||||
|
|
||||||
return aos
|
|
||||||
|
|
||||||
|
|
||||||
def find_collection_item(collection, item_name=False, item_id=False):
|
|
||||||
"""
|
|
||||||
Find collection_item based on name or id from a collection object
|
|
||||||
Both Collection_item and Collection Objects are provided by aos-pyez library
|
|
||||||
|
|
||||||
Return
|
|
||||||
collection_item: object corresponding to the collection type
|
|
||||||
"""
|
|
||||||
my_dict = None
|
|
||||||
|
|
||||||
if item_name:
|
|
||||||
my_dict = collection.find(label=item_name)
|
|
||||||
elif item_id:
|
|
||||||
my_dict = collection.find(uid=item_id)
|
|
||||||
|
|
||||||
if my_dict is None:
|
|
||||||
return collection['']
|
|
||||||
else:
|
|
||||||
return my_dict
|
|
||||||
|
|
||||||
|
|
||||||
def content_to_dict(module, content):
|
|
||||||
"""
|
|
||||||
Convert 'content' into a Python Dict based on 'content_format'
|
|
||||||
"""
|
|
||||||
|
|
||||||
# if not HAS_YAML:
|
|
||||||
# module.fail_json(msg="Python Library Yaml is not present, mandatory to use 'content'")
|
|
||||||
|
|
||||||
content_dict = None
|
|
||||||
|
|
||||||
# try:
|
|
||||||
# content_dict = json.loads(content.replace("\'", '"'))
|
|
||||||
# except:
|
|
||||||
# module.fail_json(msg="Unable to convert 'content' from JSON, please check if valid")
|
|
||||||
#
|
|
||||||
# elif format in ['yaml', 'var']:
|
|
||||||
|
|
||||||
try:
|
|
||||||
content_dict = yaml.safe_load(content)
|
|
||||||
|
|
||||||
if not isinstance(content_dict, dict):
|
|
||||||
raise Exception()
|
|
||||||
|
|
||||||
# Check if dict is empty and return an error if it's
|
|
||||||
if not content_dict:
|
|
||||||
raise Exception()
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
|
|
||||||
|
|
||||||
# replace the string with the dict
|
|
||||||
module.params['content'] = content_dict
|
|
||||||
|
|
||||||
return content_dict
|
|
||||||
|
|
||||||
|
|
||||||
def do_load_resource(module, collection, name):
|
|
||||||
"""
|
|
||||||
Create a new object (collection.item) by loading a datastructure directly
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
item = find_collection_item(collection, name, '')
|
|
||||||
except Exception:
|
|
||||||
module.fail_json(msg="An error occurred while running 'find_collection_item'")
|
|
||||||
|
|
||||||
if item.exists:
|
|
||||||
module.exit_json(changed=False, name=item.name, id=item.id, value=item.value)
|
|
||||||
|
|
||||||
# If not in check mode, apply the changes
|
|
||||||
if not module.check_mode:
|
|
||||||
try:
|
|
||||||
item.datum = module.params['content']
|
|
||||||
item.write()
|
|
||||||
except Exception as e:
|
|
||||||
module.fail_json(msg="Unable to write item content : %r" % to_native(e))
|
|
||||||
|
|
||||||
module.exit_json(changed=True, name=item.name, id=item.id, value=item.value)
|
|
|
@ -1,113 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by
|
|
||||||
# Ansible still belong to the author of the module, and may assign their own
|
|
||||||
# license to the complete work.
|
|
||||||
#
|
|
||||||
# Copyright (C) 2019 APCON, Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
# Contains utility methods
|
|
||||||
# APCON Networking
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import EntityCollection
|
|
||||||
from ansible.module_utils.connection import Connection, exec_command
|
|
||||||
from ansible.module_utils.connection import ConnectionError
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = {}
|
|
||||||
_CONNECTION = None
|
|
||||||
|
|
||||||
|
|
||||||
command_spec = {
|
|
||||||
'command': dict(key=True),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def check_args(module, warnings):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection(module):
|
|
||||||
global _CONNECTION
|
|
||||||
if _CONNECTION:
|
|
||||||
return _CONNECTION
|
|
||||||
_CONNECTION = Connection(module._socket_path)
|
|
||||||
|
|
||||||
return _CONNECTION
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module, flags=None):
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
|
|
||||||
cmd = ' '.join(flags).strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _DEVICE_CONFIGS[cmd]
|
|
||||||
except KeyError:
|
|
||||||
conn = get_connection(module)
|
|
||||||
out = conn.get(cmd)
|
|
||||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
|
||||||
_DEVICE_CONFIGS[cmd] = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
connection = get_connection(module)
|
|
||||||
transform = EntityCollection(module, command_spec)
|
|
||||||
commands = transform(commands)
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
|
|
||||||
for cmd in commands:
|
|
||||||
out = connection.get(**cmd)
|
|
||||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, config):
|
|
||||||
try:
|
|
||||||
conn = get_connection(module)
|
|
||||||
conn.edit_config(config)
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
||||||
|
|
||||||
|
|
||||||
def get_defaults_flag(module):
|
|
||||||
rc, out, err = exec_command(module, 'display running-config ?')
|
|
||||||
out = to_text(out, errors='surrogate_then_replace')
|
|
||||||
|
|
||||||
commands = set()
|
|
||||||
for line in out.splitlines():
|
|
||||||
if line:
|
|
||||||
commands.add(line.strip().split()[0])
|
|
||||||
|
|
||||||
if 'all' in commands:
|
|
||||||
return 'all'
|
|
||||||
else:
|
|
||||||
return 'full'
|
|
|
@ -1,131 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2016 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
|
||||||
from ansible.module_utils.connection import exec_command
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = {}
|
|
||||||
|
|
||||||
aruba_provider_spec = {
|
|
||||||
'host': dict(),
|
|
||||||
'port': dict(type='int'),
|
|
||||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
|
||||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
|
||||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
|
||||||
'timeout': dict(type='int'),
|
|
||||||
}
|
|
||||||
aruba_argument_spec = {
|
|
||||||
'provider': dict(type='dict', options=aruba_provider_spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
aruba_top_spec = {
|
|
||||||
'host': dict(removed_in_version=2.9),
|
|
||||||
'port': dict(removed_in_version=2.9, type='int'),
|
|
||||||
'username': dict(removed_in_version=2.9),
|
|
||||||
'password': dict(removed_in_version=2.9, no_log=True),
|
|
||||||
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
|
|
||||||
'timeout': dict(removed_in_version=2.9, type='int'),
|
|
||||||
}
|
|
||||||
|
|
||||||
aruba_argument_spec.update(aruba_top_spec)
|
|
||||||
|
|
||||||
|
|
||||||
def get_provider_argspec():
|
|
||||||
return aruba_provider_spec
|
|
||||||
|
|
||||||
|
|
||||||
def check_args(module, warnings):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module, flags=None):
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
|
|
||||||
cmd = 'show running-config '
|
|
||||||
cmd += ' '.join(flags)
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _DEVICE_CONFIGS[cmd]
|
|
||||||
except KeyError:
|
|
||||||
rc, out, err = exec_command(module, cmd)
|
|
||||||
if rc != 0:
|
|
||||||
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
|
|
||||||
cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
|
|
||||||
_DEVICE_CONFIGS[cmd] = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def sanitize(resp):
|
|
||||||
# Takes response from device and adjusts leading whitespace to just 1 space
|
|
||||||
cleaned = []
|
|
||||||
for line in resp.splitlines():
|
|
||||||
cleaned.append(re.sub(r"^\s+", " ", line))
|
|
||||||
return '\n'.join(cleaned).strip()
|
|
||||||
|
|
||||||
|
|
||||||
def to_commands(module, commands):
|
|
||||||
spec = {
|
|
||||||
'command': dict(key=True),
|
|
||||||
'prompt': dict(),
|
|
||||||
'answer': dict()
|
|
||||||
}
|
|
||||||
transform = ComplexList(spec, module)
|
|
||||||
return transform(commands)
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
responses = list()
|
|
||||||
commands = to_commands(module, to_list(commands))
|
|
||||||
for cmd in commands:
|
|
||||||
cmd = module.jsonify(cmd)
|
|
||||||
rc, out, err = exec_command(module, cmd)
|
|
||||||
if check_rc and rc != 0:
|
|
||||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
|
|
||||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, commands):
|
|
||||||
|
|
||||||
rc, out, err = exec_command(module, 'configure terminal')
|
|
||||||
if rc != 0:
|
|
||||||
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
|
|
||||||
|
|
||||||
for command in to_list(commands):
|
|
||||||
if command == 'end':
|
|
||||||
continue
|
|
||||||
rc, out, err = exec_command(module, command)
|
|
||||||
if rc != 0:
|
|
||||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
|
|
||||||
|
|
||||||
exec_command(module, 'end')
|
|
|
@ -1,572 +0,0 @@
|
||||||
from __future__ import absolute_import
|
|
||||||
|
|
||||||
"""
|
|
||||||
Created on Aug 16, 2016
|
|
||||||
|
|
||||||
@author: Gaurav Rastogi (grastogi@avinetworks.com)
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
from copy import deepcopy
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
|
|
||||||
try:
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import (
|
|
||||||
ApiSession, ObjectNotFound, avi_sdk_syslog_logger, AviCredentials, HAS_AVI)
|
|
||||||
except ImportError:
|
|
||||||
HAS_AVI = False
|
|
||||||
|
|
||||||
|
|
||||||
if os.environ.get('AVI_LOG_HANDLER', '') != 'syslog':
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
else:
|
|
||||||
# Ansible does not allow logging from the modules.
|
|
||||||
log = avi_sdk_syslog_logger()
|
|
||||||
|
|
||||||
|
|
||||||
def _check_type_string(x):
|
|
||||||
"""
|
|
||||||
:param x:
|
|
||||||
:return: True if it is of type string
|
|
||||||
"""
|
|
||||||
if isinstance(x, str):
|
|
||||||
return True
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
try:
|
|
||||||
return isinstance(x, unicode)
|
|
||||||
except NameError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class AviCheckModeResponse(object):
|
|
||||||
"""
|
|
||||||
Class to support ansible check mode.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, obj, status_code=200):
|
|
||||||
self.obj = obj
|
|
||||||
self.status_code = status_code
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
return self.obj
|
|
||||||
|
|
||||||
|
|
||||||
def ansible_return(module, rsp, changed, req=None, existing_obj=None,
|
|
||||||
api_context=None):
|
|
||||||
"""
|
|
||||||
:param module: AnsibleModule
|
|
||||||
:param rsp: ApiResponse from avi_api
|
|
||||||
:param changed: boolean
|
|
||||||
:param req: ApiRequest to avi_api
|
|
||||||
:param existing_obj: object to be passed debug output
|
|
||||||
:param api_context: api login context
|
|
||||||
|
|
||||||
helper function to return the right ansible based on the error code and
|
|
||||||
changed
|
|
||||||
Returns: specific ansible module exit function
|
|
||||||
"""
|
|
||||||
|
|
||||||
if rsp is not None and rsp.status_code > 299:
|
|
||||||
return module.fail_json(
|
|
||||||
msg='Error %d Msg %s req: %s api_context:%s ' % (
|
|
||||||
rsp.status_code, rsp.text, req, api_context))
|
|
||||||
api_creds = AviCredentials()
|
|
||||||
api_creds.update_from_ansible_module(module)
|
|
||||||
key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
|
|
||||||
api_creds.port)
|
|
||||||
disable_fact = module.params.get('avi_disable_session_cache_as_fact')
|
|
||||||
|
|
||||||
fact_context = None
|
|
||||||
if not disable_fact:
|
|
||||||
fact_context = module.params.get('api_context', {})
|
|
||||||
if fact_context:
|
|
||||||
fact_context.update({key: api_context})
|
|
||||||
else:
|
|
||||||
fact_context = {key: api_context}
|
|
||||||
|
|
||||||
obj_val = rsp.json() if rsp else existing_obj
|
|
||||||
|
|
||||||
if (obj_val and module.params.get("obj_username", None) and
|
|
||||||
"username" in obj_val):
|
|
||||||
obj_val["obj_username"] = obj_val["username"]
|
|
||||||
if (obj_val and module.params.get("obj_password", None) and
|
|
||||||
"password" in obj_val):
|
|
||||||
obj_val["obj_password"] = obj_val["password"]
|
|
||||||
old_obj_val = existing_obj if changed and existing_obj else None
|
|
||||||
api_context_val = api_context if disable_fact else None
|
|
||||||
ansible_facts_val = dict(
|
|
||||||
avi_api_context=fact_context) if not disable_fact else {}
|
|
||||||
|
|
||||||
return module.exit_json(
|
|
||||||
changed=changed, obj=obj_val, old_obj=old_obj_val,
|
|
||||||
ansible_facts=ansible_facts_val, api_context=api_context_val)
|
|
||||||
|
|
||||||
|
|
||||||
def purge_optional_fields(obj, module):
|
|
||||||
"""
|
|
||||||
It purges the optional arguments to be sent to the controller.
|
|
||||||
:param obj: dictionary of the ansible object passed as argument.
|
|
||||||
:param module: AnsibleModule
|
|
||||||
return modified obj
|
|
||||||
"""
|
|
||||||
purge_fields = []
|
|
||||||
for param, spec in module.argument_spec.items():
|
|
||||||
if not spec.get('required', False):
|
|
||||||
if param not in obj:
|
|
||||||
# these are ansible common items
|
|
||||||
continue
|
|
||||||
if obj[param] is None:
|
|
||||||
purge_fields.append(param)
|
|
||||||
log.debug('purging fields %s', purge_fields)
|
|
||||||
for param in purge_fields:
|
|
||||||
obj.pop(param, None)
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup_absent_fields(obj):
|
|
||||||
"""
|
|
||||||
cleans up any field that is marked as state: absent. It needs to be removed
|
|
||||||
from the object if it is present.
|
|
||||||
:param obj:
|
|
||||||
:return: Purged object
|
|
||||||
"""
|
|
||||||
if type(obj) != dict:
|
|
||||||
return obj
|
|
||||||
cleanup_keys = []
|
|
||||||
for k, v in obj.items():
|
|
||||||
if type(v) == dict:
|
|
||||||
if (('state' in v and v['state'] == 'absent') or
|
|
||||||
(v == "{'state': 'absent'}")):
|
|
||||||
cleanup_keys.append(k)
|
|
||||||
else:
|
|
||||||
cleanup_absent_fields(v)
|
|
||||||
if not v:
|
|
||||||
cleanup_keys.append(k)
|
|
||||||
elif type(v) == list:
|
|
||||||
new_list = []
|
|
||||||
for elem in v:
|
|
||||||
elem = cleanup_absent_fields(elem)
|
|
||||||
if elem:
|
|
||||||
# remove the item from list
|
|
||||||
new_list.append(elem)
|
|
||||||
if new_list:
|
|
||||||
obj[k] = new_list
|
|
||||||
else:
|
|
||||||
cleanup_keys.append(k)
|
|
||||||
elif isinstance(v, str) or isinstance(v, str):
|
|
||||||
if v == "{'state': 'absent'}":
|
|
||||||
cleanup_keys.append(k)
|
|
||||||
for k in cleanup_keys:
|
|
||||||
del obj[k]
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
RE_REF_MATCH = re.compile(r'^/api/[\w/]+\?name\=[\w]+[^#<>]*$')
|
|
||||||
# if HTTP ref match then strip out the #name
|
|
||||||
HTTP_REF_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.+')
|
|
||||||
HTTP_REF_W_NAME_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.*#.+')
|
|
||||||
|
|
||||||
|
|
||||||
def ref_n_str_cmp(x, y):
|
|
||||||
"""
|
|
||||||
compares two references
|
|
||||||
1. check for exact reference
|
|
||||||
2. check for obj_type/uuid
|
|
||||||
3. check for name
|
|
||||||
|
|
||||||
if x is ref=name then extract uuid and name from y and use it.
|
|
||||||
if x is http_ref then
|
|
||||||
strip x and y
|
|
||||||
compare them.
|
|
||||||
|
|
||||||
if x and y are urls then match with split on #
|
|
||||||
if x is a RE_REF_MATCH then extract name
|
|
||||||
if y is a REF_MATCH then extract name
|
|
||||||
:param x: first string
|
|
||||||
:param y: second string from controller's object
|
|
||||||
|
|
||||||
Returns
|
|
||||||
True if they are equivalent else False
|
|
||||||
"""
|
|
||||||
if type(y) in (int, float, bool, int, complex):
|
|
||||||
y = str(y)
|
|
||||||
x = str(x)
|
|
||||||
if not (_check_type_string(x) and _check_type_string(y)):
|
|
||||||
return False
|
|
||||||
y_uuid = y_name = str(y)
|
|
||||||
x = str(x)
|
|
||||||
if RE_REF_MATCH.match(x):
|
|
||||||
x = x.split('name=')[1]
|
|
||||||
elif HTTP_REF_MATCH.match(x):
|
|
||||||
x = x.rsplit('#', 1)[0]
|
|
||||||
y = y.rsplit('#', 1)[0]
|
|
||||||
elif RE_REF_MATCH.match(y):
|
|
||||||
y = y.split('name=')[1]
|
|
||||||
|
|
||||||
if HTTP_REF_W_NAME_MATCH.match(y):
|
|
||||||
path = y.split('api/', 1)[1]
|
|
||||||
# Fetching name or uuid from path /xxxx_xx/xx/xx_x/uuid_or_name
|
|
||||||
uuid_or_name = path.split('/')[-1]
|
|
||||||
parts = uuid_or_name.rsplit('#', 1)
|
|
||||||
y_uuid = parts[0]
|
|
||||||
y_name = parts[1] if len(parts) > 1 else ''
|
|
||||||
# is just string but y is a url so match either uuid or name
|
|
||||||
result = (x in (y, y_name, y_uuid))
|
|
||||||
if not result:
|
|
||||||
log.debug('x: %s y: %s y_name %s y_uuid %s',
|
|
||||||
x, y, y_name, y_uuid)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def avi_obj_cmp(x, y, sensitive_fields=None):
|
|
||||||
"""
|
|
||||||
compares whether x is fully contained in y. The comparision is different
|
|
||||||
from a simple dictionary compare for following reasons
|
|
||||||
1. Some fields could be references. The object in controller returns the
|
|
||||||
full URL for those references. However, the ansible script would have
|
|
||||||
it specified as /api/pool?name=blah. So, the reference fields need
|
|
||||||
to match uuid, relative reference based on name and actual reference.
|
|
||||||
|
|
||||||
2. Optional fields with defaults: In case there are optional fields with
|
|
||||||
defaults then controller automatically fills it up. This would
|
|
||||||
cause the comparison with Ansible object specification to always return
|
|
||||||
changed.
|
|
||||||
|
|
||||||
3. Optional fields without defaults: This is most tricky. The issue is
|
|
||||||
how to specify deletion of such objects from ansible script. If the
|
|
||||||
ansible playbook has object specified as Null then Avi controller will
|
|
||||||
reject for non Message(dict) type fields. In addition, to deal with the
|
|
||||||
defaults=null issue all the fields that are set with None are purged
|
|
||||||
out before comparing with Avi controller's version
|
|
||||||
|
|
||||||
So, the solution is to pass state: absent if any optional field needs
|
|
||||||
to be deleted from the configuration. The script would return changed
|
|
||||||
=true if it finds a key in the controller version and it is marked with
|
|
||||||
state: absent in ansible playbook. Alternatively, it would return
|
|
||||||
false if key is not present in the controller object. Before, doing
|
|
||||||
put or post it would purge the fields that are marked state: absent.
|
|
||||||
|
|
||||||
:param x: first string
|
|
||||||
:param y: second string from controller's object
|
|
||||||
:param sensitive_fields: sensitive fields to ignore for diff
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if x is subset of y else False
|
|
||||||
"""
|
|
||||||
if not sensitive_fields:
|
|
||||||
sensitive_fields = set()
|
|
||||||
if isinstance(x, str) or isinstance(x, str):
|
|
||||||
# Special handling for strings as they can be references.
|
|
||||||
return ref_n_str_cmp(x, y)
|
|
||||||
if type(x) not in [list, dict]:
|
|
||||||
# if it is not list or dict or string then simply compare the values
|
|
||||||
return x == y
|
|
||||||
if type(x) == list:
|
|
||||||
# should compare each item in the list and that should match
|
|
||||||
if len(x) != len(y):
|
|
||||||
log.debug('x has %d items y has %d', len(x), len(y))
|
|
||||||
return False
|
|
||||||
for i in zip(x, y):
|
|
||||||
if not avi_obj_cmp(i[0], i[1], sensitive_fields=sensitive_fields):
|
|
||||||
# no need to continue
|
|
||||||
return False
|
|
||||||
|
|
||||||
if type(x) == dict:
|
|
||||||
x.pop('_last_modified', None)
|
|
||||||
x.pop('tenant', None)
|
|
||||||
y.pop('_last_modified', None)
|
|
||||||
x.pop('api_version', None)
|
|
||||||
y.pop('api_verison', None)
|
|
||||||
d_xks = [k for k in x.keys() if k in sensitive_fields]
|
|
||||||
|
|
||||||
if d_xks:
|
|
||||||
# if there is sensitive field then always return changed
|
|
||||||
return False
|
|
||||||
# pop the keys that are marked deleted but not present in y
|
|
||||||
# return false if item is marked absent and is present in y
|
|
||||||
d_x_absent_ks = []
|
|
||||||
for k, v in x.items():
|
|
||||||
if v is None:
|
|
||||||
d_x_absent_ks.append(k)
|
|
||||||
continue
|
|
||||||
if isinstance(v, dict):
|
|
||||||
if ('state' in v) and (v['state'] == 'absent'):
|
|
||||||
if type(y) == dict and k not in y:
|
|
||||||
d_x_absent_ks.append(k)
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
elif not v:
|
|
||||||
d_x_absent_ks.append(k)
|
|
||||||
elif isinstance(v, list) and not v:
|
|
||||||
d_x_absent_ks.append(k)
|
|
||||||
# Added condition to check key in dict.
|
|
||||||
elif isinstance(v, str) or (k in y and isinstance(y[k], str)):
|
|
||||||
# this is the case when ansible converts the dictionary into a
|
|
||||||
# string.
|
|
||||||
if v == "{'state': 'absent'}" and k not in y:
|
|
||||||
d_x_absent_ks.append(k)
|
|
||||||
elif not v and k not in y:
|
|
||||||
# this is the case when x has set the value that qualifies
|
|
||||||
# as not but y does not have that value
|
|
||||||
d_x_absent_ks.append(k)
|
|
||||||
for k in d_x_absent_ks:
|
|
||||||
x.pop(k)
|
|
||||||
x_keys = set(x.keys())
|
|
||||||
y_keys = set(y.keys())
|
|
||||||
if not x_keys.issubset(y_keys):
|
|
||||||
# log.debug('x has %s and y has %s keys', len(x_keys), len(y_keys))
|
|
||||||
return False
|
|
||||||
for k, v in x.items():
|
|
||||||
if k not in y:
|
|
||||||
# log.debug('k %s is not in y %s', k, y)
|
|
||||||
return False
|
|
||||||
if not avi_obj_cmp(v, y[k], sensitive_fields=sensitive_fields):
|
|
||||||
# log.debug('k %s v %s did not match in y %s', k, v, y[k])
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
POP_FIELDS = ['state', 'controller', 'username', 'password', 'api_version',
|
|
||||||
'avi_credentials', 'avi_api_update_method', 'avi_api_patch_op',
|
|
||||||
'api_context', 'tenant', 'tenant_uuid', 'avi_disable_session_cache_as_fact']
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_context(module, api_creds):
|
|
||||||
api_context = module.params.get('api_context')
|
|
||||||
if api_context and module.params.get('avi_disable_session_cache_as_fact'):
|
|
||||||
return api_context
|
|
||||||
elif api_context and not module.params.get(
|
|
||||||
'avi_disable_session_cache_as_fact'):
|
|
||||||
key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
|
|
||||||
api_creds.port)
|
|
||||||
return api_context.get(key)
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def avi_ansible_api(module, obj_type, sensitive_fields):
|
|
||||||
"""
|
|
||||||
This converts the Ansible module into AVI object and invokes APIs
|
|
||||||
:param module: Ansible module
|
|
||||||
:param obj_type: string representing Avi object type
|
|
||||||
:param sensitive_fields: sensitive fields to be excluded for comparison
|
|
||||||
purposes.
|
|
||||||
Returns:
|
|
||||||
success: module.exit_json with obj=avi object
|
|
||||||
faliure: module.fail_json
|
|
||||||
"""
|
|
||||||
|
|
||||||
api_creds = AviCredentials()
|
|
||||||
api_creds.update_from_ansible_module(module)
|
|
||||||
api_context = get_api_context(module, api_creds)
|
|
||||||
if api_context:
|
|
||||||
api = ApiSession.get_session(
|
|
||||||
api_creds.controller,
|
|
||||||
api_creds.username,
|
|
||||||
password=api_creds.password,
|
|
||||||
timeout=api_creds.timeout,
|
|
||||||
tenant=api_creds.tenant,
|
|
||||||
tenant_uuid=api_creds.tenant_uuid,
|
|
||||||
token=api_context['csrftoken'],
|
|
||||||
port=api_creds.port,
|
|
||||||
session_id=api_context['session_id'],
|
|
||||||
csrftoken=api_context['csrftoken'])
|
|
||||||
else:
|
|
||||||
api = ApiSession.get_session(
|
|
||||||
api_creds.controller,
|
|
||||||
api_creds.username,
|
|
||||||
password=api_creds.password,
|
|
||||||
timeout=api_creds.timeout,
|
|
||||||
tenant=api_creds.tenant,
|
|
||||||
tenant_uuid=api_creds.tenant_uuid,
|
|
||||||
token=api_creds.token,
|
|
||||||
port=api_creds.port)
|
|
||||||
state = module.params['state']
|
|
||||||
# Get the api version.
|
|
||||||
avi_update_method = module.params.get('avi_api_update_method', 'put')
|
|
||||||
avi_patch_op = module.params.get('avi_api_patch_op', 'add')
|
|
||||||
|
|
||||||
api_version = api_creds.api_version
|
|
||||||
name = module.params.get('name', None)
|
|
||||||
# Added Support to get uuid
|
|
||||||
uuid = module.params.get('uuid', None)
|
|
||||||
check_mode = module.check_mode
|
|
||||||
if uuid and obj_type != 'cluster':
|
|
||||||
obj_path = '%s/%s' % (obj_type, uuid)
|
|
||||||
else:
|
|
||||||
obj_path = '%s/' % obj_type
|
|
||||||
obj = deepcopy(module.params)
|
|
||||||
tenant = obj.pop('tenant', '')
|
|
||||||
tenant_uuid = obj.pop('tenant_uuid', '')
|
|
||||||
# obj.pop('cloud_ref', None)
|
|
||||||
for k in POP_FIELDS:
|
|
||||||
obj.pop(k, None)
|
|
||||||
purge_optional_fields(obj, module)
|
|
||||||
|
|
||||||
# Special code to handle situation where object has a field
|
|
||||||
# named username. This is used in case of api/user
|
|
||||||
# The following code copies the username and password
|
|
||||||
# from the obj_username and obj_password fields.
|
|
||||||
if 'obj_username' in obj:
|
|
||||||
obj['username'] = obj['obj_username']
|
|
||||||
obj.pop('obj_username')
|
|
||||||
if 'obj_password' in obj:
|
|
||||||
obj['password'] = obj['obj_password']
|
|
||||||
obj.pop('obj_password')
|
|
||||||
if 'full_name' not in obj and 'name' in obj and obj_type == "user":
|
|
||||||
obj['full_name'] = obj['name']
|
|
||||||
# Special case as name represent full_name in user module
|
|
||||||
# As per API response, name is always same as username regardless of full_name
|
|
||||||
obj['name'] = obj['username']
|
|
||||||
|
|
||||||
log.info('passed object %s ', obj)
|
|
||||||
|
|
||||||
if uuid:
|
|
||||||
# Get the object based on uuid.
|
|
||||||
try:
|
|
||||||
existing_obj = api.get(
|
|
||||||
obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
params={'include_refs': '', 'include_name': ''},
|
|
||||||
api_version=api_version)
|
|
||||||
existing_obj = existing_obj.json()
|
|
||||||
except ObjectNotFound:
|
|
||||||
existing_obj = None
|
|
||||||
elif name:
|
|
||||||
params = {'include_refs': '', 'include_name': ''}
|
|
||||||
if obj.get('cloud_ref', None):
|
|
||||||
# this is the case when gets have to be scoped with cloud
|
|
||||||
cloud = obj['cloud_ref'].split('name=')[1]
|
|
||||||
params['cloud_ref.name'] = cloud
|
|
||||||
existing_obj = api.get_object_by_name(
|
|
||||||
obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
params=params, api_version=api_version)
|
|
||||||
|
|
||||||
# Need to check if tenant_ref was provided and the object returned
|
|
||||||
# is actually in admin tenant.
|
|
||||||
if existing_obj and 'tenant_ref' in obj and 'tenant_ref' in existing_obj:
|
|
||||||
# https://10.10.25.42/api/tenant/admin#admin
|
|
||||||
existing_obj_tenant = existing_obj['tenant_ref'].split('#')[1]
|
|
||||||
obj_tenant = obj['tenant_ref'].split('name=')[1]
|
|
||||||
if obj_tenant != existing_obj_tenant:
|
|
||||||
existing_obj = None
|
|
||||||
else:
|
|
||||||
# added api version to avi api call.
|
|
||||||
existing_obj = api.get(obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
params={'include_refs': '', 'include_name': ''},
|
|
||||||
api_version=api_version).json()
|
|
||||||
|
|
||||||
if state == 'absent':
|
|
||||||
rsp = None
|
|
||||||
changed = False
|
|
||||||
err = False
|
|
||||||
if not check_mode and existing_obj:
|
|
||||||
try:
|
|
||||||
if name is not None:
|
|
||||||
# added api version to avi api call.
|
|
||||||
rsp = api.delete_by_name(
|
|
||||||
obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
api_version=api_version)
|
|
||||||
else:
|
|
||||||
# added api version to avi api call.
|
|
||||||
rsp = api.delete(
|
|
||||||
obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
api_version=api_version)
|
|
||||||
except ObjectNotFound:
|
|
||||||
pass
|
|
||||||
if check_mode and existing_obj:
|
|
||||||
changed = True
|
|
||||||
|
|
||||||
if rsp:
|
|
||||||
if rsp.status_code == 204:
|
|
||||||
changed = True
|
|
||||||
else:
|
|
||||||
err = True
|
|
||||||
if not err:
|
|
||||||
return ansible_return(
|
|
||||||
module, rsp, changed, existing_obj=existing_obj,
|
|
||||||
api_context=api.get_context())
|
|
||||||
elif rsp:
|
|
||||||
return module.fail_json(msg=rsp.text)
|
|
||||||
|
|
||||||
rsp = None
|
|
||||||
req = None
|
|
||||||
if existing_obj:
|
|
||||||
# this is case of modify as object exists. should find out
|
|
||||||
# if changed is true or not
|
|
||||||
if name is not None and obj_type != 'cluster':
|
|
||||||
obj_uuid = existing_obj['uuid']
|
|
||||||
obj_path = '%s/%s' % (obj_type, obj_uuid)
|
|
||||||
if avi_update_method == 'put':
|
|
||||||
changed = not avi_obj_cmp(obj, existing_obj, sensitive_fields)
|
|
||||||
obj = cleanup_absent_fields(obj)
|
|
||||||
if changed:
|
|
||||||
req = obj
|
|
||||||
if check_mode:
|
|
||||||
# No need to process any further.
|
|
||||||
rsp = AviCheckModeResponse(obj=existing_obj)
|
|
||||||
else:
|
|
||||||
rsp = api.put(
|
|
||||||
obj_path, data=req, tenant=tenant,
|
|
||||||
tenant_uuid=tenant_uuid, api_version=api_version)
|
|
||||||
elif check_mode:
|
|
||||||
rsp = AviCheckModeResponse(obj=existing_obj)
|
|
||||||
else:
|
|
||||||
if check_mode:
|
|
||||||
# No need to process any further.
|
|
||||||
rsp = AviCheckModeResponse(obj=existing_obj)
|
|
||||||
changed = True
|
|
||||||
else:
|
|
||||||
obj.pop('name', None)
|
|
||||||
patch_data = {avi_patch_op: obj}
|
|
||||||
rsp = api.patch(
|
|
||||||
obj_path, data=patch_data, tenant=tenant,
|
|
||||||
tenant_uuid=tenant_uuid, api_version=api_version)
|
|
||||||
obj = rsp.json()
|
|
||||||
changed = not avi_obj_cmp(obj, existing_obj)
|
|
||||||
if changed:
|
|
||||||
log.debug('EXISTING OBJ %s', existing_obj)
|
|
||||||
log.debug('NEW OBJ %s', obj)
|
|
||||||
else:
|
|
||||||
changed = True
|
|
||||||
req = obj
|
|
||||||
if check_mode:
|
|
||||||
rsp = AviCheckModeResponse(obj=None)
|
|
||||||
else:
|
|
||||||
rsp = api.post(obj_type, data=obj, tenant=tenant,
|
|
||||||
tenant_uuid=tenant_uuid, api_version=api_version)
|
|
||||||
return ansible_return(module, rsp, changed, req, existing_obj=existing_obj,
|
|
||||||
api_context=api.get_context())
|
|
||||||
|
|
||||||
|
|
||||||
def avi_common_argument_spec():
|
|
||||||
"""
|
|
||||||
Returns common arguments for all Avi modules
|
|
||||||
:return: dict
|
|
||||||
"""
|
|
||||||
credentials_spec = dict(
|
|
||||||
controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
|
|
||||||
username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
|
|
||||||
password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
|
|
||||||
api_version=dict(default='16.4.4', type='str'),
|
|
||||||
tenant=dict(default='admin'),
|
|
||||||
tenant_uuid=dict(default='', type='str'),
|
|
||||||
port=dict(type='int'),
|
|
||||||
timeout=dict(default=300, type='int'),
|
|
||||||
token=dict(default='', type='str', no_log=True),
|
|
||||||
session_id=dict(default='', type='str', no_log=True),
|
|
||||||
csrftoken=dict(default='', type='str', no_log=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
return dict(
|
|
||||||
controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
|
|
||||||
username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
|
|
||||||
password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
|
|
||||||
tenant=dict(default='admin'),
|
|
||||||
tenant_uuid=dict(default=''),
|
|
||||||
api_version=dict(default='16.4.4', type='str'),
|
|
||||||
avi_credentials=dict(default=None, type='dict',
|
|
||||||
options=credentials_spec),
|
|
||||||
api_context=dict(type='dict'),
|
|
||||||
avi_disable_session_cache_as_fact=dict(default=False, type='bool'))
|
|
|
@ -1,38 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# Copyright (c), Gaurav Rastogi <grastogi@avinetworks.com>, 2017
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
# This module initially matched the namespace of network module avi. However,
|
|
||||||
# that causes namespace import error when other modules from avi namespaces
|
|
||||||
# are imported. Added import of absolute_import to avoid import collisions for
|
|
||||||
# avi.sdk.
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.avi.ansible_utils import (
|
|
||||||
avi_ansible_api, avi_common_argument_spec, ansible_return,
|
|
||||||
avi_obj_cmp, cleanup_absent_fields, AviCheckModeResponse, HAS_AVI)
|
|
|
@ -1,972 +0,0 @@
|
||||||
from __future__ import absolute_import
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import copy
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
from ssl import SSLError
|
|
||||||
|
|
||||||
|
|
||||||
class MockResponse(object):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
raise Exception("Requests library Response object not found. Using fake one.")
|
|
||||||
|
|
||||||
|
|
||||||
class MockRequestsConnectionError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MockSession(object):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
raise Exception("Requests library Session object not found. Using fake one.")
|
|
||||||
|
|
||||||
|
|
||||||
HAS_AVI = True
|
|
||||||
try:
|
|
||||||
from requests import ConnectionError as RequestsConnectionError
|
|
||||||
from requests import Response
|
|
||||||
from requests.sessions import Session
|
|
||||||
except ImportError:
|
|
||||||
HAS_AVI = False
|
|
||||||
Response = MockResponse
|
|
||||||
RequestsConnectionError = MockRequestsConnectionError
|
|
||||||
Session = MockSession
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
sessionDict = {}
|
|
||||||
|
|
||||||
|
|
||||||
def avi_timedelta(td):
|
|
||||||
'''
|
|
||||||
This is a wrapper class to workaround python 2.6 builtin datetime.timedelta
|
|
||||||
does not have total_seconds method
|
|
||||||
:param timedelta object
|
|
||||||
'''
|
|
||||||
if type(td) != timedelta:
|
|
||||||
raise TypeError()
|
|
||||||
if sys.version_info >= (2, 7):
|
|
||||||
ts = td.total_seconds()
|
|
||||||
else:
|
|
||||||
ts = td.seconds + (24 * 3600 * td.days)
|
|
||||||
return ts
|
|
||||||
|
|
||||||
|
|
||||||
def avi_sdk_syslog_logger(logger_name='avi.sdk'):
|
|
||||||
# The following sets up syslog module to log underlying avi SDK messages
|
|
||||||
# based on the environment variables:
|
|
||||||
# AVI_LOG_HANDLER: names the logging handler to use. Only syslog is
|
|
||||||
# supported.
|
|
||||||
# AVI_LOG_LEVEL: Logging level used for the avi SDK. Default is DEBUG
|
|
||||||
# AVI_SYSLOG_ADDRESS: Destination address for the syslog handler.
|
|
||||||
# Default is /dev/log
|
|
||||||
from logging.handlers import SysLogHandler
|
|
||||||
lf = '[%(asctime)s] %(levelname)s [%(module)s.%(funcName)s:%(lineno)d] %(message)s'
|
|
||||||
log = logging.getLogger(logger_name)
|
|
||||||
log_level = os.environ.get('AVI_LOG_LEVEL', 'DEBUG')
|
|
||||||
if log_level:
|
|
||||||
log.setLevel(getattr(logging, log_level))
|
|
||||||
formatter = logging.Formatter(lf)
|
|
||||||
sh = SysLogHandler(address=os.environ.get('AVI_SYSLOG_ADDRESS', '/dev/log'))
|
|
||||||
sh.setFormatter(formatter)
|
|
||||||
log.addHandler(sh)
|
|
||||||
return log
|
|
||||||
|
|
||||||
|
|
||||||
class ObjectNotFound(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class APIError(Exception):
|
|
||||||
def __init__(self, arg, rsp=None):
|
|
||||||
self.args = [arg, rsp]
|
|
||||||
self.rsp = rsp
|
|
||||||
|
|
||||||
|
|
||||||
class AviServerError(APIError):
|
|
||||||
def __init__(self, arg, rsp=None):
|
|
||||||
super(AviServerError, self).__init__(arg, rsp)
|
|
||||||
|
|
||||||
|
|
||||||
class APINotImplemented(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ApiResponse(Response):
|
|
||||||
"""
|
|
||||||
Returns copy of the requests.Response object provides additional helper
|
|
||||||
routines
|
|
||||||
1. obj: returns dictionary of Avi Object
|
|
||||||
"""
|
|
||||||
def __init__(self, rsp):
|
|
||||||
super(ApiResponse, self).__init__()
|
|
||||||
for k, v in list(rsp.__dict__.items()):
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
"""
|
|
||||||
Extends the session default json interface to handle special errors
|
|
||||||
and raise Exceptions
|
|
||||||
returns the Avi object as a dictionary from rsp.text
|
|
||||||
"""
|
|
||||||
if self.status_code in (200, 201):
|
|
||||||
if not self.text:
|
|
||||||
# In cases like status_code == 201 the response text could be
|
|
||||||
# empty string.
|
|
||||||
return None
|
|
||||||
return super(ApiResponse, self).json()
|
|
||||||
elif self.status_code == 204:
|
|
||||||
# No response needed; e.g., delete operation
|
|
||||||
return None
|
|
||||||
elif self.status_code == 404:
|
|
||||||
raise ObjectNotFound('HTTP Error: %s Error Msg %s' % (
|
|
||||||
self.status_code, self.text), self)
|
|
||||||
elif self.status_code >= 500:
|
|
||||||
raise AviServerError('HTTP Error: %s Error Msg %s' % (
|
|
||||||
self.status_code, self.text), self)
|
|
||||||
else:
|
|
||||||
raise APIError('HTTP Error: %s Error Msg %s' % (
|
|
||||||
self.status_code, self.text), self)
|
|
||||||
|
|
||||||
def count(self):
|
|
||||||
"""
|
|
||||||
return the number of objects in the collection response. If it is not
|
|
||||||
a collection response then it would simply return 1.
|
|
||||||
"""
|
|
||||||
obj = self.json()
|
|
||||||
if 'count' in obj:
|
|
||||||
# this was a resposne to collection
|
|
||||||
return obj['count']
|
|
||||||
return 1
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def to_avi_response(resp):
|
|
||||||
if type(resp) == Response:
|
|
||||||
return ApiResponse(resp)
|
|
||||||
return resp
|
|
||||||
|
|
||||||
|
|
||||||
class AviCredentials(object):
|
|
||||||
controller = ''
|
|
||||||
username = ''
|
|
||||||
password = ''
|
|
||||||
api_version = '16.4.4'
|
|
||||||
tenant = None
|
|
||||||
tenant_uuid = None
|
|
||||||
token = None
|
|
||||||
port = None
|
|
||||||
timeout = 300
|
|
||||||
session_id = None
|
|
||||||
csrftoken = None
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
for k, v in kwargs.items():
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
def update_from_ansible_module(self, m):
|
|
||||||
"""
|
|
||||||
:param m: ansible module
|
|
||||||
:return:
|
|
||||||
"""
|
|
||||||
if m.params.get('avi_credentials'):
|
|
||||||
for k, v in m.params['avi_credentials'].items():
|
|
||||||
if hasattr(self, k):
|
|
||||||
setattr(self, k, v)
|
|
||||||
if m.params['controller']:
|
|
||||||
self.controller = m.params['controller']
|
|
||||||
if m.params['username']:
|
|
||||||
self.username = m.params['username']
|
|
||||||
if m.params['password']:
|
|
||||||
self.password = m.params['password']
|
|
||||||
if (m.params['api_version'] and
|
|
||||||
(m.params['api_version'] != '16.4.4')):
|
|
||||||
self.api_version = m.params['api_version']
|
|
||||||
if m.params['tenant']:
|
|
||||||
self.tenant = m.params['tenant']
|
|
||||||
if m.params['tenant_uuid']:
|
|
||||||
self.tenant_uuid = m.params['tenant_uuid']
|
|
||||||
if m.params.get('session_id'):
|
|
||||||
self.session_id = m.params['session_id']
|
|
||||||
if m.params.get('csrftoken'):
|
|
||||||
self.csrftoken = m.params['csrftoken']
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return 'controller %s user %s api %s tenant %s' % (
|
|
||||||
self.controller, self.username, self.api_version, self.tenant)
|
|
||||||
|
|
||||||
|
|
||||||
class ApiSession(Session):
|
|
||||||
"""
|
|
||||||
Extends the Request library's session object to provide helper
|
|
||||||
utilities to work with Avi Controller like authentication, api massaging
|
|
||||||
etc.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# This keeps track of the process which created the cache.
|
|
||||||
# At anytime the pid of the process changes then it would create
|
|
||||||
# a new cache for that process.
|
|
||||||
AVI_SLUG = 'Slug'
|
|
||||||
SESSION_CACHE_EXPIRY = 20 * 60
|
|
||||||
SHARED_USER_HDRS = ['X-CSRFToken', 'Session-Id', 'Referer', 'Content-Type']
|
|
||||||
MAX_API_RETRIES = 3
|
|
||||||
|
|
||||||
def __init__(self, controller_ip=None, username=None, password=None,
|
|
||||||
token=None, tenant=None, tenant_uuid=None, verify=False,
|
|
||||||
port=None, timeout=60, api_version=None,
|
|
||||||
retry_conxn_errors=True, data_log=False,
|
|
||||||
avi_credentials=None, session_id=None, csrftoken=None,
|
|
||||||
lazy_authentication=False, max_api_retries=None):
|
|
||||||
"""
|
|
||||||
ApiSession takes ownership of avi_credentials and may update the
|
|
||||||
information inside it.
|
|
||||||
|
|
||||||
Initialize new session object with authenticated token from login api.
|
|
||||||
It also keeps a cache of user sessions that are cleaned up if inactive
|
|
||||||
for more than 20 mins.
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
01. If mode is https and port is none or 443, we don't embed the
|
|
||||||
port in the prefix. The prefix would be 'https://ip'. If port
|
|
||||||
is a non-default value then we concatenate https://ip:port
|
|
||||||
in the prefix.
|
|
||||||
02. If mode is http and the port is none or 80, we don't embed the
|
|
||||||
port in the prefix. The prefix would be 'http://ip'. If port is
|
|
||||||
a non-default value, then we concatenate http://ip:port in
|
|
||||||
the prefix.
|
|
||||||
"""
|
|
||||||
super(ApiSession, self).__init__()
|
|
||||||
if not avi_credentials:
|
|
||||||
tenant = tenant if tenant else "admin"
|
|
||||||
self.avi_credentials = AviCredentials(
|
|
||||||
controller=controller_ip, username=username, password=password,
|
|
||||||
api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
token=token, port=port, timeout=timeout,
|
|
||||||
session_id=session_id, csrftoken=csrftoken)
|
|
||||||
else:
|
|
||||||
self.avi_credentials = avi_credentials
|
|
||||||
self.headers = {}
|
|
||||||
self.verify = verify
|
|
||||||
self.retry_conxn_errors = retry_conxn_errors
|
|
||||||
self.remote_api_version = {}
|
|
||||||
self.session_cookie_name = ''
|
|
||||||
self.user_hdrs = {}
|
|
||||||
self.data_log = data_log
|
|
||||||
self.num_session_retries = 0
|
|
||||||
self.retry_wait_time = 0
|
|
||||||
self.max_session_retries = (
|
|
||||||
self.MAX_API_RETRIES if max_api_retries is None
|
|
||||||
else int(max_api_retries))
|
|
||||||
# Refer Notes 01 and 02
|
|
||||||
k_port = port if port else 443
|
|
||||||
if self.avi_credentials.controller.startswith('http'):
|
|
||||||
k_port = 80 if not self.avi_credentials.port else k_port
|
|
||||||
if self.avi_credentials.port is None or self.avi_credentials.port\
|
|
||||||
== 80:
|
|
||||||
self.prefix = self.avi_credentials.controller
|
|
||||||
else:
|
|
||||||
self.prefix = '{x}:{y}'.format(
|
|
||||||
x=self.avi_credentials.controller,
|
|
||||||
y=self.avi_credentials.port)
|
|
||||||
else:
|
|
||||||
if port is None or port == 443:
|
|
||||||
self.prefix = 'https://{x}'.format(
|
|
||||||
x=self.avi_credentials.controller)
|
|
||||||
else:
|
|
||||||
self.prefix = 'https://{x}:{y}'.format(
|
|
||||||
x=self.avi_credentials.controller,
|
|
||||||
y=self.avi_credentials.port)
|
|
||||||
self.timeout = timeout
|
|
||||||
self.key = '%s:%s:%s' % (self.avi_credentials.controller,
|
|
||||||
self.avi_credentials.username, k_port)
|
|
||||||
# Added api token and session id to sessionDict for handle single
|
|
||||||
# session
|
|
||||||
if self.avi_credentials.csrftoken:
|
|
||||||
sessionDict[self.key] = {
|
|
||||||
'api': self,
|
|
||||||
"csrftoken": self.avi_credentials.csrftoken,
|
|
||||||
"session_id": self.avi_credentials.session_id,
|
|
||||||
"last_used": datetime.utcnow()
|
|
||||||
}
|
|
||||||
elif lazy_authentication:
|
|
||||||
sessionDict.get(self.key, {}).update(
|
|
||||||
{'api': self, "last_used": datetime.utcnow()})
|
|
||||||
else:
|
|
||||||
self.authenticate_session()
|
|
||||||
|
|
||||||
self.num_session_retries = 0
|
|
||||||
self.pid = os.getpid()
|
|
||||||
ApiSession._clean_inactive_sessions()
|
|
||||||
return
|
|
||||||
|
|
||||||
@property
|
|
||||||
def controller_ip(self):
|
|
||||||
return self.avi_credentials.controller
|
|
||||||
|
|
||||||
@controller_ip.setter
|
|
||||||
def controller_ip(self, controller_ip):
|
|
||||||
self.avi_credentials.controller = controller_ip
|
|
||||||
|
|
||||||
@property
|
|
||||||
def username(self):
|
|
||||||
return self.avi_credentials.username
|
|
||||||
|
|
||||||
@property
|
|
||||||
def connected(self):
|
|
||||||
return sessionDict.get(self.key, {}).get('connected', False)
|
|
||||||
|
|
||||||
@username.setter
|
|
||||||
def username(self, username):
|
|
||||||
self.avi_credentials.username = username
|
|
||||||
|
|
||||||
@property
|
|
||||||
def password(self):
|
|
||||||
return self.avi_credentials.password
|
|
||||||
|
|
||||||
@password.setter
|
|
||||||
def password(self, password):
|
|
||||||
self.avi_credentials.password = password
|
|
||||||
|
|
||||||
@property
|
|
||||||
def keystone_token(self):
|
|
||||||
return sessionDict.get(self.key, {}).get('csrftoken', None)
|
|
||||||
|
|
||||||
@keystone_token.setter
|
|
||||||
def keystone_token(self, token):
|
|
||||||
sessionDict[self.key]['csrftoken'] = token
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tenant_uuid(self):
|
|
||||||
self.avi_credentials.tenant_uuid
|
|
||||||
|
|
||||||
@tenant_uuid.setter
|
|
||||||
def tenant_uuid(self, tenant_uuid):
|
|
||||||
self.avi_credentials.tenant_uuid = tenant_uuid
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tenant(self):
|
|
||||||
return self.avi_credentials.tenant
|
|
||||||
|
|
||||||
@tenant.setter
|
|
||||||
def tenant(self, tenant):
|
|
||||||
if tenant:
|
|
||||||
self.avi_credentials.tenant = tenant
|
|
||||||
else:
|
|
||||||
self.avi_credentials.tenant = 'admin'
|
|
||||||
|
|
||||||
@property
|
|
||||||
def port(self):
|
|
||||||
self.avi_credentials.port
|
|
||||||
|
|
||||||
@port.setter
|
|
||||||
def port(self, port):
|
|
||||||
self.avi_credentials.port = port
|
|
||||||
|
|
||||||
@property
|
|
||||||
def api_version(self):
|
|
||||||
return self.avi_credentials.api_version
|
|
||||||
|
|
||||||
@api_version.setter
|
|
||||||
def api_version(self, api_version):
|
|
||||||
self.avi_credentials.api_version = api_version
|
|
||||||
|
|
||||||
@property
|
|
||||||
def session_id(self):
|
|
||||||
return sessionDict[self.key]['session_id']
|
|
||||||
|
|
||||||
def get_context(self):
|
|
||||||
return {
|
|
||||||
'session_id': sessionDict[self.key]['session_id'],
|
|
||||||
'csrftoken': sessionDict[self.key]['csrftoken']
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def clear_cached_sessions():
|
|
||||||
global sessionDict
|
|
||||||
sessionDict = {}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_session(
|
|
||||||
controller_ip=None, username=None, password=None, token=None, tenant=None,
|
|
||||||
tenant_uuid=None, verify=False, port=None, timeout=60,
|
|
||||||
retry_conxn_errors=True, api_version=None, data_log=False,
|
|
||||||
avi_credentials=None, session_id=None, csrftoken=None,
|
|
||||||
lazy_authentication=False, max_api_retries=None):
|
|
||||||
"""
|
|
||||||
returns the session object for same user and tenant
|
|
||||||
calls init if session dose not exist and adds it to session cache
|
|
||||||
:param controller_ip: controller IP address
|
|
||||||
:param username:
|
|
||||||
:param password:
|
|
||||||
:param token: Token to use; example, a valid keystone token
|
|
||||||
:param tenant: Name of the tenant on Avi Controller
|
|
||||||
:param tenant_uuid: Don't specify tenant when using tenant_id
|
|
||||||
:param port: Rest-API may use a different port other than 443
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param retry_conxn_errors: retry on connection errors
|
|
||||||
:param api_version: Controller API version
|
|
||||||
"""
|
|
||||||
if not avi_credentials:
|
|
||||||
tenant = tenant if tenant else "admin"
|
|
||||||
avi_credentials = AviCredentials(
|
|
||||||
controller=controller_ip, username=username, password=password,
|
|
||||||
api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
token=token, port=port, timeout=timeout,
|
|
||||||
session_id=session_id, csrftoken=csrftoken)
|
|
||||||
|
|
||||||
k_port = avi_credentials.port if avi_credentials.port else 443
|
|
||||||
if avi_credentials.controller.startswith('http'):
|
|
||||||
k_port = 80 if not avi_credentials.port else k_port
|
|
||||||
key = '%s:%s:%s' % (avi_credentials.controller,
|
|
||||||
avi_credentials.username, k_port)
|
|
||||||
cached_session = sessionDict.get(key)
|
|
||||||
if cached_session:
|
|
||||||
user_session = cached_session['api']
|
|
||||||
if not (user_session.avi_credentials.csrftoken or
|
|
||||||
lazy_authentication):
|
|
||||||
user_session.authenticate_session()
|
|
||||||
else:
|
|
||||||
user_session = ApiSession(
|
|
||||||
controller_ip, username, password, token=token, tenant=tenant,
|
|
||||||
tenant_uuid=tenant_uuid, verify=verify, port=port,
|
|
||||||
timeout=timeout, retry_conxn_errors=retry_conxn_errors,
|
|
||||||
api_version=api_version, data_log=data_log,
|
|
||||||
avi_credentials=avi_credentials,
|
|
||||||
lazy_authentication=lazy_authentication,
|
|
||||||
max_api_retries=max_api_retries)
|
|
||||||
ApiSession._clean_inactive_sessions()
|
|
||||||
return user_session
|
|
||||||
|
|
||||||
def reset_session(self):
|
|
||||||
"""
|
|
||||||
resets and re-authenticates the current session.
|
|
||||||
"""
|
|
||||||
sessionDict[self.key]['connected'] = False
|
|
||||||
logger.info('resetting session for %s', self.key)
|
|
||||||
self.user_hdrs = {}
|
|
||||||
for k, v in self.headers.items():
|
|
||||||
if k not in self.SHARED_USER_HDRS:
|
|
||||||
self.user_hdrs[k] = v
|
|
||||||
self.headers = {}
|
|
||||||
self.authenticate_session()
|
|
||||||
|
|
||||||
def authenticate_session(self):
|
|
||||||
"""
|
|
||||||
Performs session authentication with Avi controller and stores
|
|
||||||
session cookies and sets header options like tenant.
|
|
||||||
"""
|
|
||||||
body = {"username": self.avi_credentials.username}
|
|
||||||
if self.avi_credentials.password:
|
|
||||||
body["password"] = self.avi_credentials.password
|
|
||||||
elif self.avi_credentials.token:
|
|
||||||
body["token"] = self.avi_credentials.token
|
|
||||||
else:
|
|
||||||
raise APIError("Neither user password or token provided")
|
|
||||||
logger.debug('authenticating user %s prefix %s',
|
|
||||||
self.avi_credentials.username, self.prefix)
|
|
||||||
self.cookies.clear()
|
|
||||||
err = None
|
|
||||||
try:
|
|
||||||
rsp = super(ApiSession, self).post(
|
|
||||||
self.prefix + "/login", body, timeout=self.timeout, verify=self.verify)
|
|
||||||
|
|
||||||
if rsp.status_code == 200:
|
|
||||||
self.num_session_retries = 0
|
|
||||||
self.remote_api_version = rsp.json().get('version', {})
|
|
||||||
self.session_cookie_name = rsp.json().get('session_cookie_name', 'sessionid')
|
|
||||||
self.headers.update(self.user_hdrs)
|
|
||||||
if rsp.cookies and 'csrftoken' in rsp.cookies:
|
|
||||||
csrftoken = rsp.cookies['csrftoken']
|
|
||||||
sessionDict[self.key] = {
|
|
||||||
'csrftoken': csrftoken,
|
|
||||||
'session_id': rsp.cookies[self.session_cookie_name],
|
|
||||||
'last_used': datetime.utcnow(),
|
|
||||||
'api': self,
|
|
||||||
'connected': True
|
|
||||||
}
|
|
||||||
logger.debug("authentication success for user %s",
|
|
||||||
self.avi_credentials.username)
|
|
||||||
return
|
|
||||||
# Check for bad request and invalid credentials response code
|
|
||||||
elif rsp.status_code in [401, 403]:
|
|
||||||
logger.error('Status Code %s msg %s', rsp.status_code, rsp.text)
|
|
||||||
err = APIError('Status Code %s msg %s' % (
|
|
||||||
rsp.status_code, rsp.text), rsp)
|
|
||||||
raise err
|
|
||||||
else:
|
|
||||||
logger.error("Error status code %s msg %s", rsp.status_code,
|
|
||||||
rsp.text)
|
|
||||||
err = APIError('Status Code %s msg %s' % (
|
|
||||||
rsp.status_code, rsp.text), rsp)
|
|
||||||
except (RequestsConnectionError, SSLError) as e:
|
|
||||||
if not self.retry_conxn_errors:
|
|
||||||
raise
|
|
||||||
logger.warning('Connection error retrying %s', e)
|
|
||||||
err = e
|
|
||||||
# comes here only if there was either exception or login was not
|
|
||||||
# successful
|
|
||||||
if self.retry_wait_time:
|
|
||||||
time.sleep(self.retry_wait_time)
|
|
||||||
self.num_session_retries += 1
|
|
||||||
if self.num_session_retries > self.max_session_retries:
|
|
||||||
self.num_session_retries = 0
|
|
||||||
logger.error("giving up after %d retries connection failure %s",
|
|
||||||
self.max_session_retries, True)
|
|
||||||
ret_err = (
|
|
||||||
err if err else APIError("giving up after %d retries connection failure %s" %
|
|
||||||
(self.max_session_retries, True)))
|
|
||||||
raise ret_err
|
|
||||||
self.authenticate_session()
|
|
||||||
return
|
|
||||||
|
|
||||||
def _get_api_headers(self, tenant, tenant_uuid, timeout, headers,
|
|
||||||
api_version):
|
|
||||||
"""
|
|
||||||
returns the headers that are passed to the requests.Session api calls.
|
|
||||||
"""
|
|
||||||
api_hdrs = copy.deepcopy(self.headers)
|
|
||||||
api_hdrs.update({
|
|
||||||
"Referer": self.prefix,
|
|
||||||
"Content-Type": "application/json"
|
|
||||||
})
|
|
||||||
api_hdrs['timeout'] = str(timeout)
|
|
||||||
if self.key in sessionDict and 'csrftoken' in sessionDict.get(self.key):
|
|
||||||
api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken']
|
|
||||||
else:
|
|
||||||
self.authenticate_session()
|
|
||||||
api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken']
|
|
||||||
if api_version:
|
|
||||||
api_hdrs['X-Avi-Version'] = api_version
|
|
||||||
elif self.avi_credentials.api_version:
|
|
||||||
api_hdrs['X-Avi-Version'] = self.avi_credentials.api_version
|
|
||||||
if tenant:
|
|
||||||
tenant_uuid = None
|
|
||||||
elif tenant_uuid:
|
|
||||||
tenant = None
|
|
||||||
else:
|
|
||||||
tenant = self.avi_credentials.tenant
|
|
||||||
tenant_uuid = self.avi_credentials.tenant_uuid
|
|
||||||
if tenant_uuid:
|
|
||||||
api_hdrs.update({"X-Avi-Tenant-UUID": "%s" % tenant_uuid})
|
|
||||||
api_hdrs.pop("X-Avi-Tenant", None)
|
|
||||||
elif tenant:
|
|
||||||
api_hdrs.update({"X-Avi-Tenant": "%s" % tenant})
|
|
||||||
api_hdrs.pop("X-Avi-Tenant-UUID", None)
|
|
||||||
# Override any user headers that were passed by users. We don't know
|
|
||||||
# when the user had updated the user_hdrs
|
|
||||||
if self.user_hdrs:
|
|
||||||
api_hdrs.update(self.user_hdrs)
|
|
||||||
if headers:
|
|
||||||
# overwrite the headers passed via the API calls.
|
|
||||||
api_hdrs.update(headers)
|
|
||||||
return api_hdrs
|
|
||||||
|
|
||||||
def _api(self, api_name, path, tenant, tenant_uuid, data=None,
|
|
||||||
headers=None, timeout=None, api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
It calls the requests.Session APIs and handles session expiry
|
|
||||||
and other situations where session needs to be reset.
|
|
||||||
returns ApiResponse object
|
|
||||||
:param path: takes relative path to the AVI api.
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param headers: dictionary of headers that override the session
|
|
||||||
headers.
|
|
||||||
"""
|
|
||||||
if self.pid != os.getpid():
|
|
||||||
logger.info('pid %d change detected new %d. Closing session',
|
|
||||||
self.pid, os.getpid())
|
|
||||||
self.close()
|
|
||||||
self.pid = os.getpid()
|
|
||||||
if timeout is None:
|
|
||||||
timeout = self.timeout
|
|
||||||
fullpath = self._get_api_path(path)
|
|
||||||
fn = getattr(super(ApiSession, self), api_name)
|
|
||||||
api_hdrs = self._get_api_headers(tenant, tenant_uuid, timeout, headers,
|
|
||||||
api_version)
|
|
||||||
connection_error = False
|
|
||||||
err = None
|
|
||||||
cookies = {
|
|
||||||
'csrftoken': api_hdrs['X-CSRFToken'],
|
|
||||||
}
|
|
||||||
try:
|
|
||||||
if self.session_cookie_name:
|
|
||||||
cookies[self.session_cookie_name] = sessionDict[self.key]['session_id']
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
if (data is not None) and (type(data) == dict):
|
|
||||||
resp = fn(fullpath, data=json.dumps(data), headers=api_hdrs,
|
|
||||||
timeout=timeout, cookies=cookies, **kwargs)
|
|
||||||
else:
|
|
||||||
resp = fn(fullpath, data=data, headers=api_hdrs,
|
|
||||||
timeout=timeout, cookies=cookies, **kwargs)
|
|
||||||
except (RequestsConnectionError, SSLError) as e:
|
|
||||||
logger.warning('Connection error retrying %s', e)
|
|
||||||
if not self.retry_conxn_errors:
|
|
||||||
raise
|
|
||||||
connection_error = True
|
|
||||||
err = e
|
|
||||||
except Exception as e:
|
|
||||||
logger.error('Error in Requests library %s', e)
|
|
||||||
raise
|
|
||||||
if not connection_error:
|
|
||||||
logger.debug('path: %s http_method: %s hdrs: %s params: '
|
|
||||||
'%s data: %s rsp: %s', fullpath, api_name.upper(),
|
|
||||||
api_hdrs, kwargs, data,
|
|
||||||
(resp.text if self.data_log else 'None'))
|
|
||||||
if connection_error or resp.status_code in (401, 419):
|
|
||||||
if connection_error:
|
|
||||||
try:
|
|
||||||
self.close()
|
|
||||||
except Exception:
|
|
||||||
# ignoring exception in cleanup path
|
|
||||||
pass
|
|
||||||
logger.warning('Connection failed, retrying.')
|
|
||||||
# Adding sleep before retrying
|
|
||||||
if self.retry_wait_time:
|
|
||||||
time.sleep(self.retry_wait_time)
|
|
||||||
else:
|
|
||||||
logger.info('received error %d %s so resetting connection',
|
|
||||||
resp.status_code, resp.text)
|
|
||||||
ApiSession.reset_session(self)
|
|
||||||
self.num_session_retries += 1
|
|
||||||
if self.num_session_retries > self.max_session_retries:
|
|
||||||
# Added this such that any code which re-tries can succeed
|
|
||||||
# eventually.
|
|
||||||
self.num_session_retries = 0
|
|
||||||
if not connection_error:
|
|
||||||
err = APIError('Status Code %s msg %s' % (
|
|
||||||
resp.status_code, resp.text), resp)
|
|
||||||
logger.error(
|
|
||||||
"giving up after %d retries conn failure %s err %s",
|
|
||||||
self.max_session_retries, connection_error, err)
|
|
||||||
ret_err = (
|
|
||||||
err if err else APIError("giving up after %d retries connection failure %s" %
|
|
||||||
(self.max_session_retries, True)))
|
|
||||||
raise ret_err
|
|
||||||
# should restore the updated_hdrs to one passed down
|
|
||||||
resp = self._api(api_name, path, tenant, tenant_uuid, data,
|
|
||||||
headers=headers, api_version=api_version,
|
|
||||||
timeout=timeout, **kwargs)
|
|
||||||
self.num_session_retries = 0
|
|
||||||
|
|
||||||
if resp.cookies and 'csrftoken' in resp.cookies:
|
|
||||||
csrftoken = resp.cookies['csrftoken']
|
|
||||||
self.headers.update({"X-CSRFToken": csrftoken})
|
|
||||||
self._update_session_last_used()
|
|
||||||
return ApiResponse.to_avi_response(resp)
|
|
||||||
|
|
||||||
def get_controller_details(self):
|
|
||||||
result = {
|
|
||||||
"controller_ip": self.controller_ip,
|
|
||||||
"controller_api_version": self.remote_api_version
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
|
|
||||||
def get(self, path, tenant='', tenant_uuid='', timeout=None, params=None,
|
|
||||||
api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
It extends the Session Library interface to add AVI API prefixes,
|
|
||||||
handle session exceptions related to authentication and update
|
|
||||||
the global user session cache.
|
|
||||||
:param path: takes relative path to the AVI api.
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
get method takes relative path to service and kwargs as per Session
|
|
||||||
class get method
|
|
||||||
returns session's response object
|
|
||||||
"""
|
|
||||||
return self._api('get', path, tenant, tenant_uuid, timeout=timeout,
|
|
||||||
params=params, api_version=api_version, **kwargs)
|
|
||||||
|
|
||||||
def get_object_by_name(self, path, name, tenant='', tenant_uuid='',
|
|
||||||
timeout=None, params=None, api_version=None,
|
|
||||||
**kwargs):
|
|
||||||
"""
|
|
||||||
Helper function to access Avi REST Objects using object
|
|
||||||
type and name. It behaves like python dictionary interface where it
|
|
||||||
returns None when the object is not present in the AviController.
|
|
||||||
Internally, it transforms the request to api/path?name=<name>...
|
|
||||||
:param path: relative path to service
|
|
||||||
:param name: name of the object
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
returns dictionary object if successful else None
|
|
||||||
"""
|
|
||||||
obj = None
|
|
||||||
if not params:
|
|
||||||
params = {}
|
|
||||||
params['name'] = name
|
|
||||||
resp = self.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
|
|
||||||
timeout=timeout,
|
|
||||||
params=params, api_version=api_version, **kwargs)
|
|
||||||
if resp.status_code in (401, 419):
|
|
||||||
ApiSession.reset_session(self)
|
|
||||||
resp = self.get_object_by_name(
|
|
||||||
path, name, tenant, tenant_uuid, timeout=timeout,
|
|
||||||
params=params, **kwargs)
|
|
||||||
if resp.status_code > 499 or 'Invalid version' in resp.text:
|
|
||||||
logger.error('Error in get object by name for %s named %s. '
|
|
||||||
'Error: %s', path, name, resp.text)
|
|
||||||
raise AviServerError(resp.text, rsp=resp)
|
|
||||||
elif resp.status_code > 299:
|
|
||||||
return obj
|
|
||||||
try:
|
|
||||||
if 'results' in resp.json():
|
|
||||||
obj = resp.json()['results'][0]
|
|
||||||
else:
|
|
||||||
# For apis returning single object eg. api/cluster
|
|
||||||
obj = resp.json()
|
|
||||||
except IndexError:
|
|
||||||
logger.warning('Warning: Object Not found for %s named %s',
|
|
||||||
path, name)
|
|
||||||
obj = None
|
|
||||||
self._update_session_last_used()
|
|
||||||
return obj
|
|
||||||
|
|
||||||
def post(self, path, data=None, tenant='', tenant_uuid='', timeout=None,
|
|
||||||
force_uuid=None, params=None, api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
It extends the Session Library interface to add AVI API prefixes,
|
|
||||||
handle session exceptions related to authentication and update
|
|
||||||
the global user session cache.
|
|
||||||
:param path: takes relative path to the AVI api.It is modified by
|
|
||||||
the library to conform to AVI Controller's REST API interface
|
|
||||||
:param data: dictionary of the data. Support for json string
|
|
||||||
is deprecated
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
returns session's response object
|
|
||||||
"""
|
|
||||||
if force_uuid is not None:
|
|
||||||
headers = kwargs.get('headers', {})
|
|
||||||
headers[self.AVI_SLUG] = force_uuid
|
|
||||||
kwargs['headers'] = headers
|
|
||||||
return self._api('post', path, tenant, tenant_uuid, data=data,
|
|
||||||
timeout=timeout, params=params,
|
|
||||||
api_version=api_version, **kwargs)
|
|
||||||
|
|
||||||
def put(self, path, data=None, tenant='', tenant_uuid='',
|
|
||||||
timeout=None, params=None, api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
It extends the Session Library interface to add AVI API prefixes,
|
|
||||||
handle session exceptions related to authentication and update
|
|
||||||
the global user session cache.
|
|
||||||
:param path: takes relative path to the AVI api.It is modified by
|
|
||||||
the library to conform to AVI Controller's REST API interface
|
|
||||||
:param data: dictionary of the data. Support for json string
|
|
||||||
is deprecated
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
returns session's response object
|
|
||||||
"""
|
|
||||||
return self._api('put', path, tenant, tenant_uuid, data=data,
|
|
||||||
timeout=timeout, params=params,
|
|
||||||
api_version=api_version, **kwargs)
|
|
||||||
|
|
||||||
def patch(self, path, data=None, tenant='', tenant_uuid='',
|
|
||||||
timeout=None, params=None, api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
It extends the Session Library interface to add AVI API prefixes,
|
|
||||||
handle session exceptions related to authentication and update
|
|
||||||
the global user session cache.
|
|
||||||
:param path: takes relative path to the AVI api.It is modified by
|
|
||||||
the library to conform to AVI Controller's REST API interface
|
|
||||||
:param data: dictionary of the data. Support for json string
|
|
||||||
is deprecated
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
returns session's response object
|
|
||||||
"""
|
|
||||||
return self._api('patch', path, tenant, tenant_uuid, data=data,
|
|
||||||
timeout=timeout, params=params,
|
|
||||||
api_version=api_version, **kwargs)
|
|
||||||
|
|
||||||
def put_by_name(self, path, name, data=None, tenant='',
|
|
||||||
tenant_uuid='', timeout=None, params=None,
|
|
||||||
api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
Helper function to perform HTTP PUT on Avi REST Objects using object
|
|
||||||
type and name.
|
|
||||||
Internally, it transforms the request to api/path?name=<name>...
|
|
||||||
:param path: relative path to service
|
|
||||||
:param name: name of the object
|
|
||||||
:param data: dictionary of the data. Support for json string
|
|
||||||
is deprecated
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
returns session's response object
|
|
||||||
"""
|
|
||||||
uuid = self._get_uuid_by_name(
|
|
||||||
path, name, tenant, tenant_uuid, api_version=api_version)
|
|
||||||
path = '%s/%s' % (path, uuid)
|
|
||||||
return self.put(path, data, tenant, tenant_uuid, timeout=timeout,
|
|
||||||
params=params, api_version=api_version, **kwargs)
|
|
||||||
|
|
||||||
def delete(self, path, tenant='', tenant_uuid='', timeout=None, params=None,
|
|
||||||
data=None, api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
It extends the Session Library interface to add AVI API prefixes,
|
|
||||||
handle session exceptions related to authentication and update
|
|
||||||
the global user session cache.
|
|
||||||
:param path: takes relative path to the AVI api.It is modified by
|
|
||||||
the library to conform to AVI Controller's REST API interface
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param data: dictionary of the data. Support for json string
|
|
||||||
is deprecated
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
returns session's response object
|
|
||||||
"""
|
|
||||||
return self._api('delete', path, tenant, tenant_uuid, data=data,
|
|
||||||
timeout=timeout, params=params,
|
|
||||||
api_version=api_version, **kwargs)
|
|
||||||
|
|
||||||
def delete_by_name(self, path, name, tenant='', tenant_uuid='',
|
|
||||||
timeout=None, params=None, api_version=None, **kwargs):
|
|
||||||
"""
|
|
||||||
Helper function to perform HTTP DELETE on Avi REST Objects using object
|
|
||||||
type and name.Internally, it transforms the request to
|
|
||||||
api/path?name=<name>...
|
|
||||||
:param path: relative path to service
|
|
||||||
:param name: name of the object
|
|
||||||
:param tenant: overrides the tenant used during session creation
|
|
||||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
|
||||||
creation
|
|
||||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
|
||||||
:param params: dictionary of key value pairs to be sent as query
|
|
||||||
parameters
|
|
||||||
:param api_version: overrides x-avi-header in request header during
|
|
||||||
session creation
|
|
||||||
returns session's response object
|
|
||||||
"""
|
|
||||||
uuid = self._get_uuid_by_name(path, name, tenant, tenant_uuid,
|
|
||||||
api_version=api_version)
|
|
||||||
if not uuid:
|
|
||||||
raise ObjectNotFound("%s/?name=%s" % (path, name))
|
|
||||||
path = '%s/%s' % (path, uuid)
|
|
||||||
return self.delete(path, tenant, tenant_uuid, timeout=timeout,
|
|
||||||
params=params, api_version=api_version, **kwargs)
|
|
||||||
|
|
||||||
def get_obj_ref(self, obj):
|
|
||||||
"""returns reference url from dict object"""
|
|
||||||
if not obj:
|
|
||||||
return None
|
|
||||||
if isinstance(obj, Response):
|
|
||||||
obj = json.loads(obj.text)
|
|
||||||
if obj.get(0, None):
|
|
||||||
return obj[0]['url']
|
|
||||||
elif obj.get('url', None):
|
|
||||||
return obj['url']
|
|
||||||
elif obj.get('results', None):
|
|
||||||
return obj['results'][0]['url']
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_obj_uuid(self, obj):
|
|
||||||
"""returns uuid from dict object"""
|
|
||||||
if not obj:
|
|
||||||
raise ObjectNotFound('Object %s Not found' % (obj))
|
|
||||||
if isinstance(obj, Response):
|
|
||||||
obj = json.loads(obj.text)
|
|
||||||
if obj.get(0, None):
|
|
||||||
return obj[0]['uuid']
|
|
||||||
elif obj.get('uuid', None):
|
|
||||||
return obj['uuid']
|
|
||||||
elif obj.get('results', None):
|
|
||||||
return obj['results'][0]['uuid']
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _get_api_path(self, path, uuid=None):
|
|
||||||
"""
|
|
||||||
This function returns the full url from relative path and uuid.
|
|
||||||
"""
|
|
||||||
if path == 'logout':
|
|
||||||
return self.prefix + '/' + path
|
|
||||||
elif uuid:
|
|
||||||
return self.prefix + '/api/' + path + '/' + uuid
|
|
||||||
else:
|
|
||||||
return self.prefix + '/api/' + path
|
|
||||||
|
|
||||||
def _get_uuid_by_name(self, path, name, tenant='admin',
|
|
||||||
tenant_uuid='', api_version=None):
|
|
||||||
"""gets object by name and service path and returns uuid"""
|
|
||||||
resp = self.get_object_by_name(
|
|
||||||
path, name, tenant, tenant_uuid, api_version=api_version)
|
|
||||||
if not resp:
|
|
||||||
raise ObjectNotFound("%s/%s" % (path, name))
|
|
||||||
return self.get_obj_uuid(resp)
|
|
||||||
|
|
||||||
def _update_session_last_used(self):
|
|
||||||
if self.key in sessionDict:
|
|
||||||
sessionDict[self.key]["last_used"] = datetime.utcnow()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _clean_inactive_sessions():
|
|
||||||
"""Removes sessions which are inactive more than 20 min"""
|
|
||||||
session_cache = sessionDict
|
|
||||||
logger.debug("cleaning inactive sessions in pid %d num elem %d",
|
|
||||||
os.getpid(), len(session_cache))
|
|
||||||
keys_to_delete = []
|
|
||||||
for key, session in list(session_cache.items()):
|
|
||||||
tdiff = avi_timedelta(datetime.utcnow() - session["last_used"])
|
|
||||||
if tdiff < ApiSession.SESSION_CACHE_EXPIRY:
|
|
||||||
continue
|
|
||||||
keys_to_delete.append(key)
|
|
||||||
for key in keys_to_delete:
|
|
||||||
del session_cache[key]
|
|
||||||
logger.debug("Removed session for : %s", key)
|
|
||||||
|
|
||||||
def delete_session(self):
|
|
||||||
""" Removes the session for cleanup"""
|
|
||||||
logger.debug("Removed session for : %s", self.key)
|
|
||||||
sessionDict.pop(self.key, None)
|
|
||||||
return
|
|
||||||
# End of file
|
|
|
@ -1,91 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2016, Ted Elhourani <ted@bigswitch.com>
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.module_utils.urls import fetch_url
|
|
||||||
|
|
||||||
|
|
||||||
class Response(object):
|
|
||||||
|
|
||||||
def __init__(self, resp, info):
|
|
||||||
self.body = None
|
|
||||||
if resp:
|
|
||||||
self.body = resp.read()
|
|
||||||
self.info = info
|
|
||||||
|
|
||||||
@property
|
|
||||||
def json(self):
|
|
||||||
if not self.body:
|
|
||||||
if "body" in self.info:
|
|
||||||
return json.loads(self.info["body"])
|
|
||||||
return None
|
|
||||||
try:
|
|
||||||
return json.loads(self.body)
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def status_code(self):
|
|
||||||
return self.info["status"]
|
|
||||||
|
|
||||||
|
|
||||||
class Rest(object):
|
|
||||||
|
|
||||||
def __init__(self, module, headers, baseurl):
|
|
||||||
self.module = module
|
|
||||||
self.headers = headers
|
|
||||||
self.baseurl = baseurl
|
|
||||||
|
|
||||||
def _url_builder(self, path):
|
|
||||||
if path[0] == '/':
|
|
||||||
path = path[1:]
|
|
||||||
return '%s/%s' % (self.baseurl, path)
|
|
||||||
|
|
||||||
def send(self, method, path, data=None, headers=None):
|
|
||||||
url = self._url_builder(path)
|
|
||||||
data = self.module.jsonify(data)
|
|
||||||
|
|
||||||
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
|
|
||||||
|
|
||||||
return Response(resp, info)
|
|
||||||
|
|
||||||
def get(self, path, data=None, headers=None):
|
|
||||||
return self.send('GET', path, data, headers)
|
|
||||||
|
|
||||||
def put(self, path, data=None, headers=None):
|
|
||||||
return self.send('PUT', path, data, headers)
|
|
||||||
|
|
||||||
def post(self, path, data=None, headers=None):
|
|
||||||
return self.send('POST', path, data, headers)
|
|
||||||
|
|
||||||
def patch(self, path, data=None, headers=None):
|
|
||||||
return self.send('PATCH', path, data, headers)
|
|
||||||
|
|
||||||
def delete(self, path, data=None, headers=None):
|
|
||||||
return self.send('DELETE', path, data, headers)
|
|
|
@ -1,421 +0,0 @@
|
||||||
#
|
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
#
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2017 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
|
||||||
from ansible.module_utils.connection import exec_command, ConnectionError
|
|
||||||
from ansible.module_utils.six import iteritems
|
|
||||||
from ansible.module_utils._text import to_native
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import NetconfConnection
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
from ncclient.xml_ import to_xml, new_ele_ns
|
|
||||||
HAS_NCCLIENT = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_NCCLIENT = False
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
from lxml import etree
|
|
||||||
except ImportError:
|
|
||||||
from xml.etree import ElementTree as etree
|
|
||||||
|
|
||||||
_DEVICE_CLI_CONNECTION = None
|
|
||||||
_DEVICE_NC_CONNECTION = None
|
|
||||||
|
|
||||||
ce_provider_spec = {
|
|
||||||
'host': dict(),
|
|
||||||
'port': dict(type='int'),
|
|
||||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
|
||||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
|
||||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
|
||||||
'use_ssl': dict(type='bool'),
|
|
||||||
'validate_certs': dict(type='bool'),
|
|
||||||
'timeout': dict(type='int'),
|
|
||||||
'transport': dict(default='cli', choices=['cli', 'netconf']),
|
|
||||||
}
|
|
||||||
ce_argument_spec = {
|
|
||||||
'provider': dict(type='dict', options=ce_provider_spec),
|
|
||||||
}
|
|
||||||
ce_top_spec = {
|
|
||||||
'host': dict(removed_in_version=2.9),
|
|
||||||
'port': dict(removed_in_version=2.9, type='int'),
|
|
||||||
'username': dict(removed_in_version=2.9),
|
|
||||||
'password': dict(removed_in_version=2.9, no_log=True),
|
|
||||||
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
|
|
||||||
'use_ssl': dict(removed_in_version=2.9, type='bool'),
|
|
||||||
'validate_certs': dict(removed_in_version=2.9, type='bool'),
|
|
||||||
'timeout': dict(removed_in_version=2.9, type='int'),
|
|
||||||
'transport': dict(removed_in_version=2.9, choices=['cli', 'netconf']),
|
|
||||||
}
|
|
||||||
ce_argument_spec.update(ce_top_spec)
|
|
||||||
|
|
||||||
|
|
||||||
def to_string(data):
|
|
||||||
return re.sub(r'<data\s+.+?(/>|>)', r'<data\1', data)
|
|
||||||
|
|
||||||
|
|
||||||
def check_args(module, warnings):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def load_params(module):
|
|
||||||
"""load_params"""
|
|
||||||
provider = module.params.get('provider') or dict()
|
|
||||||
for key, value in iteritems(provider):
|
|
||||||
if key in ce_argument_spec:
|
|
||||||
if module.params.get(key) is None and value is not None:
|
|
||||||
module.params[key] = value
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection(module):
|
|
||||||
"""get_connection"""
|
|
||||||
global _DEVICE_CLI_CONNECTION
|
|
||||||
if not _DEVICE_CLI_CONNECTION:
|
|
||||||
load_params(module)
|
|
||||||
conn = Cli(module)
|
|
||||||
_DEVICE_CLI_CONNECTION = conn
|
|
||||||
return _DEVICE_CLI_CONNECTION
|
|
||||||
|
|
||||||
|
|
||||||
def rm_config_prefix(cfg):
|
|
||||||
if not cfg:
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
cmds = cfg.split("\n")
|
|
||||||
for i in range(len(cmds)):
|
|
||||||
if not cmds[i]:
|
|
||||||
continue
|
|
||||||
if '~' in cmds[i]:
|
|
||||||
index = cmds[i].index('~')
|
|
||||||
if cmds[i][:index] == ' ' * index:
|
|
||||||
cmds[i] = cmds[i].replace("~", "", 1)
|
|
||||||
return '\n'.join(cmds)
|
|
||||||
|
|
||||||
|
|
||||||
class Cli:
|
|
||||||
|
|
||||||
def __init__(self, module):
|
|
||||||
self._module = module
|
|
||||||
self._device_configs = {}
|
|
||||||
|
|
||||||
def exec_command(self, command):
|
|
||||||
if isinstance(command, dict):
|
|
||||||
command = self._module.jsonify(command)
|
|
||||||
|
|
||||||
return exec_command(self._module, command)
|
|
||||||
|
|
||||||
def get_config(self, flags=None):
|
|
||||||
"""Retrieves the current config from the device or cache
|
|
||||||
"""
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
|
|
||||||
cmd = 'display current-configuration '
|
|
||||||
cmd += ' '.join(flags)
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
return self._device_configs[cmd]
|
|
||||||
except KeyError:
|
|
||||||
rc, out, err = self.exec_command(cmd)
|
|
||||||
if rc != 0:
|
|
||||||
self._module.fail_json(msg=err)
|
|
||||||
cfg = str(out).strip()
|
|
||||||
# remove default configuration prefix '~'
|
|
||||||
for flag in flags:
|
|
||||||
if "include-default" in flag:
|
|
||||||
cfg = rm_config_prefix(cfg)
|
|
||||||
break
|
|
||||||
|
|
||||||
self._device_configs[cmd] = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
def run_commands(self, commands, check_rc=True):
|
|
||||||
"""Run list of commands on remote device and return results
|
|
||||||
"""
|
|
||||||
responses = list()
|
|
||||||
|
|
||||||
for item in to_list(commands):
|
|
||||||
|
|
||||||
rc, out, err = self.exec_command(item)
|
|
||||||
|
|
||||||
if check_rc and rc != 0:
|
|
||||||
self._module.fail_json(msg=cli_err_msg(item['command'].strip(), err))
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = self._module.from_json(out)
|
|
||||||
except ValueError:
|
|
||||||
out = str(out).strip()
|
|
||||||
|
|
||||||
responses.append(out)
|
|
||||||
return responses
|
|
||||||
|
|
||||||
def load_config(self, config):
|
|
||||||
"""Sends configuration commands to the remote device
|
|
||||||
"""
|
|
||||||
rc, out, err = self.exec_command('mmi-mode enable')
|
|
||||||
if rc != 0:
|
|
||||||
self._module.fail_json(msg='unable to set mmi-mode enable', output=err)
|
|
||||||
rc, out, err = self.exec_command('system-view immediately')
|
|
||||||
if rc != 0:
|
|
||||||
self._module.fail_json(msg='unable to enter system-view', output=err)
|
|
||||||
|
|
||||||
for cmd in config:
|
|
||||||
rc, out, err = self.exec_command(cmd)
|
|
||||||
if rc != 0:
|
|
||||||
self._module.fail_json(msg=cli_err_msg(cmd.strip(), err))
|
|
||||||
|
|
||||||
self.exec_command('return')
|
|
||||||
|
|
||||||
|
|
||||||
def cli_err_msg(cmd, err):
|
|
||||||
""" get cli exception message"""
|
|
||||||
|
|
||||||
if not err:
|
|
||||||
return "Error: Fail to get cli exception message."
|
|
||||||
|
|
||||||
msg = list()
|
|
||||||
err_list = str(err).split("\r\n")
|
|
||||||
for err in err_list:
|
|
||||||
err = err.strip('.,\r\n\t ')
|
|
||||||
if not err:
|
|
||||||
continue
|
|
||||||
if cmd and cmd == err:
|
|
||||||
continue
|
|
||||||
if " at '^' position" in err:
|
|
||||||
err = err.replace(" at '^' position", "").strip()
|
|
||||||
err = err.strip('.,\r\n\t ')
|
|
||||||
if err == "^":
|
|
||||||
continue
|
|
||||||
if len(err) > 2 and err[0] in ["<", "["] and err[-1] in [">", "]"]:
|
|
||||||
continue
|
|
||||||
err.strip('.,\r\n\t ')
|
|
||||||
if err:
|
|
||||||
msg.append(err)
|
|
||||||
|
|
||||||
if cmd:
|
|
||||||
msg.insert(0, "Command: %s" % cmd)
|
|
||||||
|
|
||||||
return ", ".join(msg).capitalize() + "."
|
|
||||||
|
|
||||||
|
|
||||||
def to_command(module, commands):
|
|
||||||
default_output = 'text'
|
|
||||||
transform = ComplexList(dict(
|
|
||||||
command=dict(key=True),
|
|
||||||
output=dict(default=default_output),
|
|
||||||
prompt=dict(),
|
|
||||||
answer=dict()
|
|
||||||
), module)
|
|
||||||
|
|
||||||
commands = transform(to_list(commands))
|
|
||||||
|
|
||||||
return commands
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module, flags=None):
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
|
|
||||||
conn = get_connection(module)
|
|
||||||
return conn.get_config(flags)
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
conn = get_connection(module)
|
|
||||||
return conn.run_commands(to_command(module, commands), check_rc)
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, config):
|
|
||||||
"""load_config"""
|
|
||||||
conn = get_connection(module)
|
|
||||||
return conn.load_config(config)
|
|
||||||
|
|
||||||
|
|
||||||
def ce_unknown_host_cb(host, fingerprint):
|
|
||||||
""" ce_unknown_host_cb """
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def get_nc_set_id(xml_str):
|
|
||||||
"""get netconf set-id value"""
|
|
||||||
|
|
||||||
result = re.findall(r'<rpc-reply.+?set-id=\"(\d+)\"', xml_str)
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
return result[0]
|
|
||||||
|
|
||||||
|
|
||||||
def get_xml_line(xml_list, index):
|
|
||||||
"""get xml specified line valid string data"""
|
|
||||||
|
|
||||||
ele = None
|
|
||||||
while xml_list and not ele:
|
|
||||||
if index >= 0 and index >= len(xml_list):
|
|
||||||
return None
|
|
||||||
if index < 0 and abs(index) > len(xml_list):
|
|
||||||
return None
|
|
||||||
|
|
||||||
ele = xml_list[index]
|
|
||||||
if not ele.replace(" ", ""):
|
|
||||||
xml_list.pop(index)
|
|
||||||
ele = None
|
|
||||||
return ele
|
|
||||||
|
|
||||||
|
|
||||||
def merge_nc_xml(xml1, xml2):
|
|
||||||
"""merge xml1 and xml2"""
|
|
||||||
|
|
||||||
xml1_list = xml1.split("</data>")[0].split("\n")
|
|
||||||
xml2_list = xml2.split("<data>")[1].split("\n")
|
|
||||||
|
|
||||||
while True:
|
|
||||||
xml1_ele1 = get_xml_line(xml1_list, -1)
|
|
||||||
xml1_ele2 = get_xml_line(xml1_list, -2)
|
|
||||||
xml2_ele1 = get_xml_line(xml2_list, 0)
|
|
||||||
xml2_ele2 = get_xml_line(xml2_list, 1)
|
|
||||||
if not xml1_ele1 or not xml1_ele2 or not xml2_ele1 or not xml2_ele2:
|
|
||||||
return xml1
|
|
||||||
|
|
||||||
if "xmlns" in xml2_ele1:
|
|
||||||
xml2_ele1 = xml2_ele1.lstrip().split(" ")[0] + ">"
|
|
||||||
if "xmlns" in xml2_ele2:
|
|
||||||
xml2_ele2 = xml2_ele2.lstrip().split(" ")[0] + ">"
|
|
||||||
if xml1_ele1.replace(" ", "").replace("/", "") == xml2_ele1.replace(" ", "").replace("/", ""):
|
|
||||||
if xml1_ele2.replace(" ", "").replace("/", "") == xml2_ele2.replace(" ", "").replace("/", ""):
|
|
||||||
xml1_list.pop()
|
|
||||||
xml2_list.pop(0)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
return "\n".join(xml1_list + xml2_list)
|
|
||||||
|
|
||||||
|
|
||||||
def get_nc_connection(module):
|
|
||||||
global _DEVICE_NC_CONNECTION
|
|
||||||
if not _DEVICE_NC_CONNECTION:
|
|
||||||
load_params(module)
|
|
||||||
conn = NetconfConnection(module._socket_path)
|
|
||||||
_DEVICE_NC_CONNECTION = conn
|
|
||||||
return _DEVICE_NC_CONNECTION
|
|
||||||
|
|
||||||
|
|
||||||
def set_nc_config(module, xml_str):
|
|
||||||
""" set_config """
|
|
||||||
|
|
||||||
conn = get_nc_connection(module)
|
|
||||||
try:
|
|
||||||
out = conn.edit_config(target='running', config=xml_str, default_operation='merge',
|
|
||||||
error_option='rollback-on-error')
|
|
||||||
finally:
|
|
||||||
# conn.unlock(target = 'candidate')
|
|
||||||
pass
|
|
||||||
return to_string(to_xml(out))
|
|
||||||
|
|
||||||
|
|
||||||
def get_nc_next(module, xml_str):
|
|
||||||
""" get_nc_next for exchange capability """
|
|
||||||
|
|
||||||
conn = get_nc_connection(module)
|
|
||||||
result = None
|
|
||||||
if xml_str is not None:
|
|
||||||
response = conn.get(xml_str, if_rpc_reply=True)
|
|
||||||
result = response.find('./*')
|
|
||||||
set_id = response.get('set-id')
|
|
||||||
while True and set_id is not None:
|
|
||||||
try:
|
|
||||||
fetch_node = new_ele_ns('get-next', 'http://www.huawei.com/netconf/capability/base/1.0', {'set-id': set_id})
|
|
||||||
next_xml = conn.dispatch_rpc(etree.tostring(fetch_node))
|
|
||||||
if next_xml is not None:
|
|
||||||
result.extend(next_xml.find('./*'))
|
|
||||||
set_id = next_xml.get('set-id')
|
|
||||||
except ConnectionError:
|
|
||||||
break
|
|
||||||
if result is not None:
|
|
||||||
return etree.tostring(result)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def get_nc_config(module, xml_str):
|
|
||||||
""" get_config """
|
|
||||||
|
|
||||||
conn = get_nc_connection(module)
|
|
||||||
if xml_str is not None:
|
|
||||||
response = conn.get(xml_str)
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return to_string(to_xml(response))
|
|
||||||
|
|
||||||
|
|
||||||
def execute_nc_action(module, xml_str):
|
|
||||||
""" huawei execute-action """
|
|
||||||
|
|
||||||
conn = get_nc_connection(module)
|
|
||||||
response = conn.execute_action(xml_str)
|
|
||||||
return to_string(to_xml(response))
|
|
||||||
|
|
||||||
|
|
||||||
def execute_nc_cli(module, xml_str):
|
|
||||||
""" huawei execute-cli """
|
|
||||||
|
|
||||||
if xml_str is not None:
|
|
||||||
try:
|
|
||||||
conn = get_nc_connection(module)
|
|
||||||
out = conn.execute_nc_cli(command=xml_str)
|
|
||||||
return to_string(to_xml(out))
|
|
||||||
except Exception as exc:
|
|
||||||
raise Exception(exc)
|
|
||||||
|
|
||||||
|
|
||||||
def check_ip_addr(ipaddr):
|
|
||||||
""" check ip address, Supports IPv4 and IPv6 """
|
|
||||||
|
|
||||||
if not ipaddr or '\x00' in ipaddr:
|
|
||||||
return False
|
|
||||||
|
|
||||||
try:
|
|
||||||
res = socket.getaddrinfo(ipaddr, 0, socket.AF_UNSPEC,
|
|
||||||
socket.SOCK_STREAM,
|
|
||||||
0, socket.AI_NUMERICHOST)
|
|
||||||
return bool(res)
|
|
||||||
except socket.gaierror:
|
|
||||||
err = sys.exc_info()[1]
|
|
||||||
if err.args[0] == socket.EAI_NONAME:
|
|
||||||
return False
|
|
||||||
raise
|
|
|
@ -1,660 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by
|
|
||||||
# Ansible still belong to the author of the module, and may assign their own
|
|
||||||
# license to the complete work.
|
|
||||||
#
|
|
||||||
# Copyright (C) 2017 Lenovo, Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
# Contains utility methods
|
|
||||||
# Lenovo Networking
|
|
||||||
|
|
||||||
import time
|
|
||||||
import socket
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
try:
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos_errorcodes
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos_devicerules
|
|
||||||
HAS_LIB = True
|
|
||||||
except Exception:
|
|
||||||
HAS_LIB = False
|
|
||||||
from distutils.cmd import Command
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection
|
|
||||||
from ansible.module_utils.connection import Connection, exec_command
|
|
||||||
from ansible.module_utils.connection import ConnectionError
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = {}
|
|
||||||
_CONNECTION = None
|
|
||||||
_VALID_USER_ROLES = ['network-admin', 'network-operator']
|
|
||||||
|
|
||||||
cnos_provider_spec = {
|
|
||||||
'host': dict(),
|
|
||||||
'port': dict(type='int'),
|
|
||||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
|
||||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']),
|
|
||||||
no_log=True),
|
|
||||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']),
|
|
||||||
type='path'),
|
|
||||||
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']),
|
|
||||||
type='bool'),
|
|
||||||
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']),
|
|
||||||
no_log=True),
|
|
||||||
'timeout': dict(type='int'),
|
|
||||||
'context': dict(),
|
|
||||||
'passwords': dict()
|
|
||||||
}
|
|
||||||
|
|
||||||
cnos_argument_spec = {
|
|
||||||
'provider': dict(type='dict', options=cnos_provider_spec),
|
|
||||||
}
|
|
||||||
|
|
||||||
command_spec = {
|
|
||||||
'command': dict(key=True),
|
|
||||||
'prompt': dict(),
|
|
||||||
'answer': dict(),
|
|
||||||
'check_all': dict()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_provider_argspec():
|
|
||||||
return cnos_provider_spec
|
|
||||||
|
|
||||||
|
|
||||||
def check_args(module, warnings):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_user_roles():
|
|
||||||
return _VALID_USER_ROLES
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection(module):
|
|
||||||
global _CONNECTION
|
|
||||||
if _CONNECTION:
|
|
||||||
return _CONNECTION
|
|
||||||
_CONNECTION = Connection(module._socket_path)
|
|
||||||
|
|
||||||
context = None
|
|
||||||
try:
|
|
||||||
context = module.params['context']
|
|
||||||
except KeyError:
|
|
||||||
context = None
|
|
||||||
|
|
||||||
if context:
|
|
||||||
if context == 'system':
|
|
||||||
command = 'changeto system'
|
|
||||||
else:
|
|
||||||
command = 'changeto context %s' % context
|
|
||||||
_CONNECTION.get(command)
|
|
||||||
|
|
||||||
return _CONNECTION
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module, flags=None):
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
|
|
||||||
passwords = None
|
|
||||||
try:
|
|
||||||
passwords = module.params['passwords']
|
|
||||||
except KeyError:
|
|
||||||
passwords = None
|
|
||||||
if passwords:
|
|
||||||
cmd = 'more system:running-config'
|
|
||||||
else:
|
|
||||||
cmd = 'display running-config '
|
|
||||||
cmd += ' '.join(flags)
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _DEVICE_CONFIGS[cmd]
|
|
||||||
except KeyError:
|
|
||||||
conn = get_connection(module)
|
|
||||||
out = conn.get(cmd)
|
|
||||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
|
||||||
_DEVICE_CONFIGS[cmd] = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def to_commands(module, commands):
|
|
||||||
if not isinstance(commands, list):
|
|
||||||
raise AssertionError('argument must be of type <list>')
|
|
||||||
|
|
||||||
transform = EntityCollection(module, command_spec)
|
|
||||||
commands = transform(commands)
|
|
||||||
|
|
||||||
for index, item in enumerate(commands):
|
|
||||||
if module.check_mode and not item['command'].startswith('show'):
|
|
||||||
module.warn('only show commands are supported when using check '
|
|
||||||
'mode, not executing `%s`' % item['command'])
|
|
||||||
|
|
||||||
return commands
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
connection = get_connection(module)
|
|
||||||
connection.get('enable')
|
|
||||||
commands = to_commands(module, to_list(commands))
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
|
|
||||||
for cmd in commands:
|
|
||||||
out = connection.get(**cmd)
|
|
||||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
def run_cnos_commands(module, commands, check_rc=True):
|
|
||||||
retVal = ''
|
|
||||||
enter_config = {'command': 'configure terminal', 'prompt': None,
|
|
||||||
'answer': None}
|
|
||||||
exit_config = {'command': 'end', 'prompt': None, 'answer': None}
|
|
||||||
commands.insert(0, enter_config)
|
|
||||||
commands.append(exit_config)
|
|
||||||
for cmd in commands:
|
|
||||||
retVal = retVal + '>> ' + cmd['command'] + '\n'
|
|
||||||
try:
|
|
||||||
responses = run_commands(module, commands, check_rc)
|
|
||||||
for response in responses:
|
|
||||||
retVal = retVal + '<< ' + response + '\n'
|
|
||||||
except Exception as e:
|
|
||||||
errMsg = ''
|
|
||||||
if hasattr(e, 'message'):
|
|
||||||
errMsg = e.message
|
|
||||||
else:
|
|
||||||
errMsg = str(e)
|
|
||||||
# Exception in Exceptions
|
|
||||||
if 'VLAN_ACCESS_MAP' in errMsg:
|
|
||||||
return retVal + '<<' + errMsg + '\n'
|
|
||||||
if 'confederation identifier' in errMsg:
|
|
||||||
return retVal + '<<' + errMsg + '\n'
|
|
||||||
# Add more here if required
|
|
||||||
retVal = retVal + '<< ' + 'Error-101 ' + errMsg + '\n'
|
|
||||||
return str(retVal)
|
|
||||||
|
|
||||||
|
|
||||||
def get_capabilities(module):
|
|
||||||
if hasattr(module, '_cnos_capabilities'):
|
|
||||||
return module._cnos_capabilities
|
|
||||||
try:
|
|
||||||
capabilities = Connection(module._socket_path).get_capabilities()
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
|
||||||
module._cnos_capabilities = json.loads(capabilities)
|
|
||||||
return module._cnos_capabilities
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, config):
|
|
||||||
try:
|
|
||||||
conn = get_connection(module)
|
|
||||||
conn.get('enable')
|
|
||||||
resp = conn.edit_config(config)
|
|
||||||
return resp.get('response')
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
||||||
|
|
||||||
|
|
||||||
def get_defaults_flag(module):
|
|
||||||
rc, out, err = exec_command(module, 'display running-config ?')
|
|
||||||
out = to_text(out, errors='surrogate_then_replace')
|
|
||||||
|
|
||||||
commands = set()
|
|
||||||
for line in out.splitlines():
|
|
||||||
if line:
|
|
||||||
commands.add(line.strip().split()[0])
|
|
||||||
|
|
||||||
if 'all' in commands:
|
|
||||||
return 'all'
|
|
||||||
else:
|
|
||||||
return 'full'
|
|
||||||
|
|
||||||
|
|
||||||
def enterEnableModeForDevice(enablePassword, timeout, obj):
|
|
||||||
command = "enable\n"
|
|
||||||
pwdPrompt = "password:"
|
|
||||||
# debugOutput(enablePassword)
|
|
||||||
# debugOutput('\n')
|
|
||||||
obj.settimeout(int(timeout))
|
|
||||||
# Executing enable
|
|
||||||
obj.send(command)
|
|
||||||
flag = False
|
|
||||||
retVal = ""
|
|
||||||
count = 5
|
|
||||||
while not flag:
|
|
||||||
# If wait time is execeeded.
|
|
||||||
if(count == 0):
|
|
||||||
flag = True
|
|
||||||
else:
|
|
||||||
count = count - 1
|
|
||||||
# A delay of one second
|
|
||||||
time.sleep(1)
|
|
||||||
try:
|
|
||||||
buffByte = obj.recv(9999)
|
|
||||||
buff = buffByte.decode()
|
|
||||||
retVal = retVal + buff
|
|
||||||
# debugOutput(buff)
|
|
||||||
gotit = buff.find(pwdPrompt)
|
|
||||||
if(gotit != -1):
|
|
||||||
time.sleep(1)
|
|
||||||
if(enablePassword is None or enablePassword == ""):
|
|
||||||
return "\n Error-106"
|
|
||||||
obj.send(enablePassword)
|
|
||||||
obj.send("\r")
|
|
||||||
obj.send("\n")
|
|
||||||
time.sleep(1)
|
|
||||||
innerBuffByte = obj.recv(9999)
|
|
||||||
innerBuff = innerBuffByte.decode()
|
|
||||||
retVal = retVal + innerBuff
|
|
||||||
# debugOutput(innerBuff)
|
|
||||||
innerGotit = innerBuff.find("#")
|
|
||||||
if(innerGotit != -1):
|
|
||||||
return retVal
|
|
||||||
else:
|
|
||||||
gotit = buff.find("#")
|
|
||||||
if(gotit != -1):
|
|
||||||
return retVal
|
|
||||||
except Exception:
|
|
||||||
retVal = retVal + "\n Error-101"
|
|
||||||
flag = True
|
|
||||||
if(retVal == ""):
|
|
||||||
retVal = "\n Error-101"
|
|
||||||
return retVal
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def waitForDeviceResponse(command, prompt, timeout, obj):
|
|
||||||
obj.settimeout(int(timeout))
|
|
||||||
obj.send(command)
|
|
||||||
flag = False
|
|
||||||
retVal = ""
|
|
||||||
while not flag:
|
|
||||||
time.sleep(1)
|
|
||||||
try:
|
|
||||||
buffByte = obj.recv(9999)
|
|
||||||
buff = buffByte.decode()
|
|
||||||
retVal = retVal + buff
|
|
||||||
# debugOutput(retVal)
|
|
||||||
gotit = buff.find(prompt)
|
|
||||||
if(gotit != -1):
|
|
||||||
flag = True
|
|
||||||
except Exception:
|
|
||||||
# debugOutput(prompt)
|
|
||||||
if prompt == "(yes/no)?":
|
|
||||||
pass
|
|
||||||
elif prompt == "Password:":
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
retVal = retVal + "\n Error-101"
|
|
||||||
flag = True
|
|
||||||
return retVal
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def checkOutputForError(output):
|
|
||||||
retVal = ""
|
|
||||||
index = output.lower().find('error')
|
|
||||||
startIndex = index + 6
|
|
||||||
if(index == -1):
|
|
||||||
index = output.lower().find('invalid')
|
|
||||||
startIndex = index + 8
|
|
||||||
if(index == -1):
|
|
||||||
index = output.lower().find('cannot be enabled in l2 interface')
|
|
||||||
startIndex = index + 34
|
|
||||||
if(index == -1):
|
|
||||||
index = output.lower().find('incorrect')
|
|
||||||
startIndex = index + 10
|
|
||||||
if(index == -1):
|
|
||||||
index = output.lower().find('failure')
|
|
||||||
startIndex = index + 8
|
|
||||||
if(index == -1):
|
|
||||||
return None
|
|
||||||
|
|
||||||
endIndex = startIndex + 3
|
|
||||||
errorCode = output[startIndex:endIndex]
|
|
||||||
result = errorCode.isdigit()
|
|
||||||
if(result is not True):
|
|
||||||
return "Device returned an Error. Please check Results for more \
|
|
||||||
information"
|
|
||||||
|
|
||||||
errorFile = "dictionary/ErrorCodes.lvo"
|
|
||||||
try:
|
|
||||||
# with open(errorFile, 'r') as f:
|
|
||||||
f = open(errorFile, 'r')
|
|
||||||
for line in f:
|
|
||||||
if('=' in line):
|
|
||||||
data = line.split('=')
|
|
||||||
if(data[0].strip() == errorCode):
|
|
||||||
errorString = data[1].strip()
|
|
||||||
return errorString
|
|
||||||
except Exception:
|
|
||||||
errorString = cnos_errorcodes.getErrorString(errorCode)
|
|
||||||
errorString = errorString.strip()
|
|
||||||
return errorString
|
|
||||||
return "Error Code Not Found"
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def checkSanityofVariable(deviceType, variableId, variableValue):
|
|
||||||
retVal = ""
|
|
||||||
ruleFile = "dictionary/" + deviceType + "_rules.lvo"
|
|
||||||
ruleString = getRuleStringForVariable(deviceType, ruleFile, variableId)
|
|
||||||
retVal = validateValueAgainstRule(ruleString, variableValue)
|
|
||||||
return retVal
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def getRuleStringForVariable(deviceType, ruleFile, variableId):
|
|
||||||
retVal = ""
|
|
||||||
try:
|
|
||||||
# with open(ruleFile, 'r') as f:
|
|
||||||
f = open(ruleFile, 'r')
|
|
||||||
for line in f:
|
|
||||||
# debugOutput(line)
|
|
||||||
if(':' in line):
|
|
||||||
data = line.split(':')
|
|
||||||
# debugOutput(data[0])
|
|
||||||
if(data[0].strip() == variableId):
|
|
||||||
retVal = line
|
|
||||||
except Exception:
|
|
||||||
ruleString = cnos_devicerules.getRuleString(deviceType, variableId)
|
|
||||||
retVal = ruleString.strip()
|
|
||||||
return retVal
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def validateValueAgainstRule(ruleString, variableValue):
|
|
||||||
|
|
||||||
retVal = ""
|
|
||||||
if(ruleString == ""):
|
|
||||||
return 1
|
|
||||||
rules = ruleString.split(':')
|
|
||||||
variableType = rules[1].strip()
|
|
||||||
varRange = rules[2].strip()
|
|
||||||
if(variableType == "INTEGER"):
|
|
||||||
result = checkInteger(variableValue)
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-111"
|
|
||||||
elif(variableType == "FLOAT"):
|
|
||||||
result = checkFloat(variableValue)
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-112"
|
|
||||||
|
|
||||||
elif(variableType == "INTEGER_VALUE"):
|
|
||||||
int_range = varRange.split('-')
|
|
||||||
r = range(int(int_range[0].strip()), int(int_range[1].strip()))
|
|
||||||
if(checkInteger(variableValue) is not True):
|
|
||||||
return "Error-111"
|
|
||||||
result = int(variableValue) in r
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-113"
|
|
||||||
|
|
||||||
elif(variableType == "INTEGER_VALUE_RANGE"):
|
|
||||||
int_range = varRange.split('-')
|
|
||||||
varLower = int_range[0].strip()
|
|
||||||
varHigher = int_range[1].strip()
|
|
||||||
r = range(int(varLower), int(varHigher))
|
|
||||||
val_range = variableValue.split('-')
|
|
||||||
try:
|
|
||||||
valLower = val_range[0].strip()
|
|
||||||
valHigher = val_range[1].strip()
|
|
||||||
except Exception:
|
|
||||||
return "Error-113"
|
|
||||||
if((checkInteger(valLower) is not True) or
|
|
||||||
(checkInteger(valHigher) is not True)):
|
|
||||||
# debugOutput("Error-114")
|
|
||||||
return "Error-114"
|
|
||||||
result = (int(valLower) in r) and (int(valHigher)in r) \
|
|
||||||
and (int(valLower) < int(valHigher))
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
# debugOutput("Error-113")
|
|
||||||
return "Error-113"
|
|
||||||
|
|
||||||
elif(variableType == "INTEGER_OPTIONS"):
|
|
||||||
int_options = varRange.split(',')
|
|
||||||
if(checkInteger(variableValue) is not True):
|
|
||||||
return "Error-111"
|
|
||||||
for opt in int_options:
|
|
||||||
if(opt.strip() is variableValue):
|
|
||||||
result = True
|
|
||||||
break
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-115"
|
|
||||||
|
|
||||||
elif(variableType == "LONG"):
|
|
||||||
result = checkLong(variableValue)
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-116"
|
|
||||||
|
|
||||||
elif(variableType == "LONG_VALUE"):
|
|
||||||
long_range = varRange.split('-')
|
|
||||||
r = range(int(long_range[0].strip()), int(long_range[1].strip()))
|
|
||||||
if(checkLong(variableValue) is not True):
|
|
||||||
# debugOutput(variableValue)
|
|
||||||
return "Error-116"
|
|
||||||
result = int(variableValue) in r
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-113"
|
|
||||||
|
|
||||||
elif(variableType == "LONG_VALUE_RANGE"):
|
|
||||||
long_range = varRange.split('-')
|
|
||||||
r = range(int(long_range[0].strip()), int(long_range[1].strip()))
|
|
||||||
val_range = variableValue.split('-')
|
|
||||||
if((checkLong(val_range[0]) is not True) or
|
|
||||||
(checkLong(val_range[1]) is not True)):
|
|
||||||
return "Error-117"
|
|
||||||
result = (val_range[0] in r) and (
|
|
||||||
val_range[1] in r) and (val_range[0] < val_range[1])
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-113"
|
|
||||||
elif(variableType == "LONG_OPTIONS"):
|
|
||||||
long_options = varRange.split(',')
|
|
||||||
if(checkLong(variableValue) is not True):
|
|
||||||
return "Error-116"
|
|
||||||
for opt in long_options:
|
|
||||||
if(opt.strip() == variableValue):
|
|
||||||
result = True
|
|
||||||
break
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-115"
|
|
||||||
|
|
||||||
elif(variableType == "TEXT"):
|
|
||||||
if(variableValue == ""):
|
|
||||||
return "Error-118"
|
|
||||||
if(True is isinstance(variableValue, str)):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-119"
|
|
||||||
|
|
||||||
elif(variableType == "NO_VALIDATION"):
|
|
||||||
if(variableValue == ""):
|
|
||||||
return "Error-118"
|
|
||||||
else:
|
|
||||||
return "ok"
|
|
||||||
|
|
||||||
elif(variableType == "TEXT_OR_EMPTY"):
|
|
||||||
if(variableValue is None or variableValue == ""):
|
|
||||||
return "ok"
|
|
||||||
if(result == isinstance(variableValue, str)):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-119"
|
|
||||||
|
|
||||||
elif(variableType == "MATCH_TEXT"):
|
|
||||||
if(variableValue == ""):
|
|
||||||
return "Error-118"
|
|
||||||
if(isinstance(variableValue, str)):
|
|
||||||
if(varRange == variableValue):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-120"
|
|
||||||
else:
|
|
||||||
return "Error-119"
|
|
||||||
|
|
||||||
elif(variableType == "MATCH_TEXT_OR_EMPTY"):
|
|
||||||
if(variableValue is None or variableValue == ""):
|
|
||||||
return "ok"
|
|
||||||
if(isinstance(variableValue, str)):
|
|
||||||
if(varRange == variableValue):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-120"
|
|
||||||
else:
|
|
||||||
return "Error-119"
|
|
||||||
|
|
||||||
elif(variableType == "TEXT_OPTIONS"):
|
|
||||||
str_options = varRange.split(',')
|
|
||||||
if(isinstance(variableValue, str) is not True):
|
|
||||||
return "Error-119"
|
|
||||||
result = False
|
|
||||||
for opt in str_options:
|
|
||||||
if(opt.strip() == variableValue):
|
|
||||||
result = True
|
|
||||||
break
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-115"
|
|
||||||
|
|
||||||
elif(variableType == "TEXT_OPTIONS_OR_EMPTY"):
|
|
||||||
if(variableValue is None or variableValue == ""):
|
|
||||||
return "ok"
|
|
||||||
str_options = varRange.split(',')
|
|
||||||
if(isinstance(variableValue, str) is not True):
|
|
||||||
return "Error-119"
|
|
||||||
for opt in str_options:
|
|
||||||
if(opt.strip() == variableValue):
|
|
||||||
result = True
|
|
||||||
break
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-115"
|
|
||||||
|
|
||||||
elif(variableType == "IPV4Address"):
|
|
||||||
try:
|
|
||||||
socket.inet_pton(socket.AF_INET, variableValue)
|
|
||||||
result = True
|
|
||||||
except socket.error:
|
|
||||||
result = False
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-121"
|
|
||||||
elif(variableType == "IPV4AddressWithMask"):
|
|
||||||
if(variableValue is None or variableValue == ""):
|
|
||||||
return "Error-119"
|
|
||||||
str_options = variableValue.split('/')
|
|
||||||
ipaddr = str_options[0]
|
|
||||||
mask = str_options[1]
|
|
||||||
try:
|
|
||||||
socket.inet_pton(socket.AF_INET, ipaddr)
|
|
||||||
if(checkInteger(mask) is True):
|
|
||||||
result = True
|
|
||||||
else:
|
|
||||||
result = False
|
|
||||||
except socket.error:
|
|
||||||
result = False
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-121"
|
|
||||||
|
|
||||||
elif(variableType == "IPV6Address"):
|
|
||||||
try:
|
|
||||||
socket.inet_pton(socket.AF_INET6, variableValue)
|
|
||||||
result = True
|
|
||||||
except socket.error:
|
|
||||||
result = False
|
|
||||||
if(result is True):
|
|
||||||
return "ok"
|
|
||||||
else:
|
|
||||||
return "Error-122"
|
|
||||||
|
|
||||||
return retVal
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def disablePaging(remote_conn):
|
|
||||||
remote_conn.send("terminal length 0\n")
|
|
||||||
time.sleep(1)
|
|
||||||
# Clear the buffer on the screen
|
|
||||||
outputByte = remote_conn.recv(1000)
|
|
||||||
output = outputByte.decode()
|
|
||||||
return output
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def checkInteger(s):
|
|
||||||
try:
|
|
||||||
int(s)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def checkFloat(s):
|
|
||||||
try:
|
|
||||||
float(s)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
# EOM
|
|
||||||
|
|
||||||
|
|
||||||
def checkLong(s):
|
|
||||||
try:
|
|
||||||
int(s)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def debugOutput(command):
|
|
||||||
f = open('debugOutput.txt', 'a')
|
|
||||||
f.write(str(command)) # python will convert \n to os.linesep
|
|
||||||
f.close() # you can omit in most cases as the destructor will call it
|
|
||||||
# EOM
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,256 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by
|
|
||||||
# Ansible still belong to the author of the module, and may assign their own
|
|
||||||
# license to the complete work.
|
|
||||||
#
|
|
||||||
# Copyright (C) 2017 Lenovo, Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
# Contains error codes and methods
|
|
||||||
# Lenovo Networking
|
|
||||||
|
|
||||||
errorDict = {0: 'Success',
|
|
||||||
1: 'NOK',
|
|
||||||
101: 'Device Response Timed out',
|
|
||||||
102: 'Command Not supported - Use CLI command',
|
|
||||||
103: 'Invalid Context',
|
|
||||||
104: 'Command Value Not Supported as of Now. Use vlan Id only',
|
|
||||||
105: 'Invalid interface Range',
|
|
||||||
106: 'Please provide Enable Password.',
|
|
||||||
108: '',
|
|
||||||
109: '',
|
|
||||||
110: 'Invalid protocol option',
|
|
||||||
111: 'The Value is not Integer',
|
|
||||||
112: 'The Value is not Float',
|
|
||||||
113: 'Value is not in Range',
|
|
||||||
114: 'Range value is not Integer',
|
|
||||||
115: 'Value is not in Options',
|
|
||||||
116: 'The Value is not Long',
|
|
||||||
117: 'Range value is not Long',
|
|
||||||
118: 'The Value cannot be empty',
|
|
||||||
119: 'The Value is not String',
|
|
||||||
120: 'The Value is not Matching',
|
|
||||||
121: 'The Value is not IPV4 Address',
|
|
||||||
122: 'The Value is not IPV6 Address',
|
|
||||||
123: '',
|
|
||||||
124: '',
|
|
||||||
125: '',
|
|
||||||
126: '',
|
|
||||||
127: '',
|
|
||||||
128: '',
|
|
||||||
129: '',
|
|
||||||
130: 'Invalid Access Map Name',
|
|
||||||
131: 'Invalid Vlan Dot1q Tag',
|
|
||||||
132: 'Invalid Vlan filter value',
|
|
||||||
133: 'Invalid Vlan Range Value',
|
|
||||||
134: 'Invalid Vlan Id',
|
|
||||||
135: 'Invalid Vlan Access Map Action',
|
|
||||||
136: 'Invalid Vlan Access Map Name',
|
|
||||||
137: 'Invalid Access List',
|
|
||||||
138: 'Invalid Vlan Access Map parameter',
|
|
||||||
139: 'Invalid Vlan Name',
|
|
||||||
140: 'Invalid Vlan Flood value,',
|
|
||||||
141: 'Invalid Vlan State Value',
|
|
||||||
142: 'Invalid Vlan Last Member query Interval',
|
|
||||||
143: 'Invalid Querier IP address',
|
|
||||||
144: 'Invalid Querier Time out',
|
|
||||||
145: 'Invalid Query Interval',
|
|
||||||
146: 'Invalid Vlan query max response time',
|
|
||||||
147: 'Invalid vlan robustness variable',
|
|
||||||
148: 'Invalid Vlan Startup Query count',
|
|
||||||
149: 'Invalid vlan Startup Query Interval',
|
|
||||||
150: 'Invalid Vlan snooping version',
|
|
||||||
151: 'Invalid Vlan Ethernet Interface',
|
|
||||||
152: 'Invalid Vlan Port Tag Number',
|
|
||||||
153: 'Invalid mrouter option',
|
|
||||||
154: 'Invalid Vlan Option',
|
|
||||||
155: '',
|
|
||||||
156: '',
|
|
||||||
157: '',
|
|
||||||
158: '',
|
|
||||||
159: '',
|
|
||||||
160: 'Invalid Vlag Auto Recovery Value',
|
|
||||||
161: 'Invalid Vlag Config Consistency Value',
|
|
||||||
162: 'Invalid Vlag Port Aggregation Number',
|
|
||||||
163: 'Invalid Vlag Priority Value',
|
|
||||||
164: 'Invalid Vlag Startup delay value',
|
|
||||||
165: 'Invalid Vlag Trie Id',
|
|
||||||
166: 'Invalid Vlag Instance Option',
|
|
||||||
167: 'Invalid Vlag Keep Alive Attempts',
|
|
||||||
168: 'Invalid Vlag Keep Alive Interval',
|
|
||||||
169: 'Invalid Vlag Retry Interval',
|
|
||||||
170: 'Invalid Vlag Peer Ip VRF Value',
|
|
||||||
171: 'Invalid Vlag Health Check Options',
|
|
||||||
172: 'Invalid Vlag Option',
|
|
||||||
173: '',
|
|
||||||
174: '',
|
|
||||||
175: '',
|
|
||||||
176: 'Invalid BGP As Number',
|
|
||||||
177: 'Invalid Routing protocol option',
|
|
||||||
178: 'Invalid BGP Address Family',
|
|
||||||
179: 'Invalid AS Path options',
|
|
||||||
180: 'Invalid BGP med options',
|
|
||||||
181: 'Invalid Best Path option',
|
|
||||||
182: 'Invalid BGP Local count number',
|
|
||||||
183: 'Cluster Id has to either IP or AS Number',
|
|
||||||
184: 'Invalid confederation identifier',
|
|
||||||
185: 'Invalid Confederation Peer AS Value',
|
|
||||||
186: 'Invalid Confederation Option',
|
|
||||||
187: 'Invalid state path relay value',
|
|
||||||
188: 'Invalid Maxas Limit AS Value',
|
|
||||||
189: 'Invalid Neighbor IP Address or Neighbor AS Number',
|
|
||||||
190: 'Invalid Router Id',
|
|
||||||
191: 'Invalid BGP Keep Alive Interval',
|
|
||||||
192: 'Invalid BGP Hold time',
|
|
||||||
193: 'Invalid BGP Option',
|
|
||||||
194: 'Invalid BGP Address Family option',
|
|
||||||
195: 'Invalid BGP Address Family Redistribution option. ',
|
|
||||||
196: 'Invalid BGP Address Family Route Map Name',
|
|
||||||
197: 'Invalid Next Hop Critical Delay',
|
|
||||||
198: 'Invalid Next Hop Non Critical Delay',
|
|
||||||
199: 'Invalid Multipath Number Value',
|
|
||||||
200: 'Invalid Aggegation Group Mode',
|
|
||||||
201: 'Invalid Aggregation Group No',
|
|
||||||
202: 'Invalid BFD Access Vlan',
|
|
||||||
203: 'Invalid CFD Bridgeport Mode',
|
|
||||||
204: 'Invalid Trunk Option',
|
|
||||||
205: 'Invalid BFD Option',
|
|
||||||
206: 'Invalid Portchannel description',
|
|
||||||
207: 'Invalid Portchannel duplex option',
|
|
||||||
208: 'Invalid Flow control option state',
|
|
||||||
209: 'Invalid Flow control option',
|
|
||||||
210: 'Invalid LACP Port priority',
|
|
||||||
211: 'Invalid LACP Time out options',
|
|
||||||
212: 'Invalid LACP Command options',
|
|
||||||
213: 'Invalid LLDP TLV Option',
|
|
||||||
214: 'Invalid LLDP Option',
|
|
||||||
215: 'Invalid Load interval delay',
|
|
||||||
216: 'Invalid Load interval Counter Number',
|
|
||||||
217: 'Invalid Load Interval option',
|
|
||||||
218: 'Invalid Mac Access Group Name',
|
|
||||||
219: 'Invalid Mac Address',
|
|
||||||
220: 'Invalid Microburst threshold value',
|
|
||||||
221: 'Invalid MTU Value',
|
|
||||||
222: 'Invalid Service instance value',
|
|
||||||
223: 'Invalid service policy name',
|
|
||||||
224: 'Invalid service policy options',
|
|
||||||
225: 'Invalid Interface speed value',
|
|
||||||
226: 'Invalid Storm control level value',
|
|
||||||
227: 'Invalid Storm control option',
|
|
||||||
228: 'Invalid Portchannel dot1q tag',
|
|
||||||
229: 'Invalid VRRP Id Value',
|
|
||||||
230: 'Invalid VRRP Options',
|
|
||||||
231: 'Invalid portchannel source interface option',
|
|
||||||
232: 'Invalid portchannel load balance options',
|
|
||||||
233: 'Invalid Portchannel configuration attribute',
|
|
||||||
234: 'Invalid BFD Interval Value',
|
|
||||||
235: 'Invalid BFD minrx Value',
|
|
||||||
236: 'Invalid BFD multiplier Value',
|
|
||||||
237: 'Invalid Key Chain Value',
|
|
||||||
238: 'Invalid key name option',
|
|
||||||
239: 'Invalid key id value',
|
|
||||||
240: 'Invalid Key Option',
|
|
||||||
241: 'Invalid authentication option',
|
|
||||||
242: 'Invalid destination Ip',
|
|
||||||
243: 'Invalid source Ip',
|
|
||||||
244: 'Invalid IP Option',
|
|
||||||
245: 'Invalid Access group option',
|
|
||||||
246: 'Invalid Access group name',
|
|
||||||
247: 'Invalid ARP MacAddress Value',
|
|
||||||
248: 'Invalid ARP timeout value',
|
|
||||||
249: 'Invalid ARP Option',
|
|
||||||
250: 'Invalid dhcp request option',
|
|
||||||
251: 'Invalid dhcp Client option',
|
|
||||||
252: 'Invalid relay Ip Address',
|
|
||||||
253: 'Invalid dhcp Option',
|
|
||||||
254: 'Invalid OSPF Option',
|
|
||||||
255: 'Invalid OSPF Id IP Address Value',
|
|
||||||
256: 'Invalid Ip Router Option',
|
|
||||||
257: 'Invalid Spanning tree bpdufilter Options',
|
|
||||||
258: 'Invalid Spanning tree bpduguard Options',
|
|
||||||
259: 'Invalid Spanning tree cost Options',
|
|
||||||
260: 'Invalid Spanning tree guard Options',
|
|
||||||
261: 'Invalid Spanning tree link-type Options',
|
|
||||||
262: 'Invalid Spanning tree link-type Options',
|
|
||||||
263: 'Invalid Spanning tree options',
|
|
||||||
264: 'Port-priority in increments of 32 is required',
|
|
||||||
265: 'Invalid Spanning tree vlan options',
|
|
||||||
266: 'Invalid IPv6 option',
|
|
||||||
267: 'Invalid IPV6 neighbor IP Address',
|
|
||||||
268: 'Invalid IPV6 neighbor mac address',
|
|
||||||
269: 'Invalid IPV6 dhcp option',
|
|
||||||
270: 'Invalid IPV6 relay address option',
|
|
||||||
271: 'Invalid IPV6 Ethernet option',
|
|
||||||
272: 'Invalid IPV6 Vlan option',
|
|
||||||
273: 'Invalid IPV6 Link Local option',
|
|
||||||
274: 'Invalid IPV6 dhcp option',
|
|
||||||
275: 'Invalid IPV6 Address',
|
|
||||||
276: 'Invalid IPV6 Address option',
|
|
||||||
277: 'Invalid BFD neighbor options',
|
|
||||||
278: 'Invalid Secondary option',
|
|
||||||
289: 'Invalid PortChannel IPV4 address',
|
|
||||||
290: 'Invalid Max Path Options',
|
|
||||||
291: 'Invalid Distance Local Route value',
|
|
||||||
292: 'Invalid Distance Internal AS value',
|
|
||||||
293: 'Invalid Distance External AS value',
|
|
||||||
294: 'Invalid BGP Reachability Half Life',
|
|
||||||
295: 'Invalid BGP Dampening parameter',
|
|
||||||
296: 'Invalid BGP Aggregate Prefix value',
|
|
||||||
297: 'Invalid BGP Aggregate Prefix Option',
|
|
||||||
298: 'Invalid BGP Address Family Route Map Name',
|
|
||||||
299: 'Invalid BGP Net IP Mask Value',
|
|
||||||
300: 'Invalid BGP Net IP Prefix Value',
|
|
||||||
301: 'Invalid BGP Neighbor configuration option',
|
|
||||||
302: 'Invalid BGP Neighbor Weight Value',
|
|
||||||
303: 'Invalid Neigbor update source option',
|
|
||||||
304: 'Invalid Ethernet slot/chassis number',
|
|
||||||
305: 'Invalid Loopback Interface number',
|
|
||||||
306: 'Invalid vlan id',
|
|
||||||
307: 'Invalid Number of hops',
|
|
||||||
308: 'Invalid Neighbor Keepalive interval',
|
|
||||||
309: 'Invalid Neighbor timer hold time',
|
|
||||||
310: 'Invalid neighbor password ',
|
|
||||||
311: 'Invalid Max peer limit',
|
|
||||||
312: 'Invalid Local AS Number',
|
|
||||||
313: 'Invalid maximum hop count',
|
|
||||||
314: 'Invalid neighbor description',
|
|
||||||
315: 'Invalid Neighbor connect timer value',
|
|
||||||
316: 'Invalid Neighbor address family option',
|
|
||||||
317: 'Invalid neighbor address family option',
|
|
||||||
318: 'Invalid route-map name',
|
|
||||||
319: 'Invalid route-map',
|
|
||||||
320: 'Invalid Name of a prefix list',
|
|
||||||
321: 'Invalid Filter incoming option',
|
|
||||||
322: 'Invalid AS path access-list name',
|
|
||||||
323: 'Invalid Filter route option',
|
|
||||||
324: 'Invalid route-map name',
|
|
||||||
325: 'Invalid Number of occurrences of AS number',
|
|
||||||
326: 'Invalid Prefix Limit'}
|
|
||||||
|
|
||||||
|
|
||||||
def getErrorString(errorCode):
|
|
||||||
retVal = errorDict[int(errorCode)]
|
|
||||||
return retVal
|
|
||||||
# EOM
|
|
|
@ -1,132 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2018 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
import json
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
|
||||||
from ansible.module_utils.connection import Connection, ConnectionError
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = None
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection(module):
|
|
||||||
if hasattr(module, '_edgeos_connection'):
|
|
||||||
return module._edgeos_connection
|
|
||||||
|
|
||||||
capabilities = get_capabilities(module)
|
|
||||||
network_api = capabilities.get('network_api')
|
|
||||||
if network_api == 'cliconf':
|
|
||||||
module._edgeos_connection = Connection(module._socket_path)
|
|
||||||
else:
|
|
||||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
|
||||||
|
|
||||||
return module._edgeos_connection
|
|
||||||
|
|
||||||
|
|
||||||
def get_capabilities(module):
|
|
||||||
if hasattr(module, '_edgeos_capabilities'):
|
|
||||||
return module._edgeos_capabilities
|
|
||||||
|
|
||||||
capabilities = Connection(module._socket_path).get_capabilities()
|
|
||||||
module._edgeos_capabilities = json.loads(capabilities)
|
|
||||||
return module._edgeos_capabilities
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module):
|
|
||||||
global _DEVICE_CONFIGS
|
|
||||||
|
|
||||||
if _DEVICE_CONFIGS is not None:
|
|
||||||
return _DEVICE_CONFIGS
|
|
||||||
else:
|
|
||||||
connection = get_connection(module)
|
|
||||||
out = connection.get_config()
|
|
||||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
|
||||||
_DEVICE_CONFIGS = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
responses = list()
|
|
||||||
connection = get_connection(module)
|
|
||||||
|
|
||||||
for cmd in to_list(commands):
|
|
||||||
if isinstance(cmd, dict):
|
|
||||||
command = cmd['command']
|
|
||||||
prompt = cmd['prompt']
|
|
||||||
answer = cmd['answer']
|
|
||||||
else:
|
|
||||||
command = cmd
|
|
||||||
prompt = None
|
|
||||||
answer = None
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = connection.get(command, prompt, answer)
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = to_text(out, errors='surrogate_or_strict')
|
|
||||||
except UnicodeError:
|
|
||||||
module.fail_json(msg=u'Failed to decode output from %s: %s' %
|
|
||||||
(cmd, to_text(out)))
|
|
||||||
|
|
||||||
responses.append(out)
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, commands, commit=False, comment=None):
|
|
||||||
connection = get_connection(module)
|
|
||||||
|
|
||||||
try:
|
|
||||||
out = connection.edit_config(commands)
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
||||||
|
|
||||||
diff = None
|
|
||||||
if module._diff:
|
|
||||||
out = connection.get('compare')
|
|
||||||
out = to_text(out, errors='surrogate_or_strict')
|
|
||||||
|
|
||||||
if not out.startswith('No changes'):
|
|
||||||
out = connection.get('show')
|
|
||||||
diff = to_text(out, errors='surrogate_or_strict').strip()
|
|
||||||
|
|
||||||
if commit:
|
|
||||||
try:
|
|
||||||
out = connection.commit(comment)
|
|
||||||
except ConnectionError:
|
|
||||||
connection.discard_changes()
|
|
||||||
module.fail_json(msg='commit failed: %s' % out)
|
|
||||||
|
|
||||||
if not commit:
|
|
||||||
connection.discard_changes()
|
|
||||||
else:
|
|
||||||
connection.get('exit')
|
|
||||||
|
|
||||||
if diff:
|
|
||||||
return diff
|
|
|
@ -1,168 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2018 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
|
|
||||||
from copy import deepcopy
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
|
||||||
from ansible.module_utils.connection import Connection, ConnectionError
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = {}
|
|
||||||
|
|
||||||
|
|
||||||
def build_aggregate_spec(element_spec, required, *extra_spec):
|
|
||||||
aggregate_spec = deepcopy(element_spec)
|
|
||||||
for elt in required:
|
|
||||||
aggregate_spec[elt] = dict(required=True)
|
|
||||||
remove_default_spec(aggregate_spec)
|
|
||||||
argument_spec = dict(
|
|
||||||
aggregate=dict(type='list', elements='dict', options=aggregate_spec)
|
|
||||||
)
|
|
||||||
argument_spec.update(element_spec)
|
|
||||||
for elt in extra_spec:
|
|
||||||
argument_spec.update(elt)
|
|
||||||
return argument_spec
|
|
||||||
|
|
||||||
|
|
||||||
def map_params_to_obj(module):
|
|
||||||
obj = []
|
|
||||||
aggregate = module.params.get('aggregate')
|
|
||||||
if aggregate:
|
|
||||||
for item in aggregate:
|
|
||||||
for key in item:
|
|
||||||
if item.get(key) is None:
|
|
||||||
item[key] = module.params[key]
|
|
||||||
|
|
||||||
d = item.copy()
|
|
||||||
obj.append(d)
|
|
||||||
else:
|
|
||||||
obj.append(module.params)
|
|
||||||
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection(module):
|
|
||||||
if hasattr(module, '_edgeswitch_connection'):
|
|
||||||
return module._edgeswitch_connection
|
|
||||||
|
|
||||||
capabilities = get_capabilities(module)
|
|
||||||
network_api = capabilities.get('network_api')
|
|
||||||
if network_api == 'cliconf':
|
|
||||||
module._edgeswitch_connection = Connection(module._socket_path)
|
|
||||||
else:
|
|
||||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
|
||||||
|
|
||||||
return module._edgeswitch_connection
|
|
||||||
|
|
||||||
|
|
||||||
def get_capabilities(module):
|
|
||||||
if hasattr(module, '_edgeswitch_capabilities'):
|
|
||||||
return module._edgeswitch_capabilities
|
|
||||||
try:
|
|
||||||
capabilities = Connection(module._socket_path).get_capabilities()
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
|
||||||
module._edgeswitch_capabilities = json.loads(capabilities)
|
|
||||||
return module._edgeswitch_capabilities
|
|
||||||
|
|
||||||
|
|
||||||
def get_defaults_flag(module):
|
|
||||||
connection = get_connection(module)
|
|
||||||
try:
|
|
||||||
out = connection.get_defaults_flag()
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
|
||||||
return to_text(out, errors='surrogate_then_replace').strip()
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module, flags=None):
|
|
||||||
flag_str = ' '.join(to_list(flags))
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _DEVICE_CONFIGS[flag_str]
|
|
||||||
except KeyError:
|
|
||||||
connection = get_connection(module)
|
|
||||||
try:
|
|
||||||
out = connection.get_config(flags=flags)
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
|
||||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
|
||||||
_DEVICE_CONFIGS[flag_str] = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def get_interfaces_config(module):
|
|
||||||
config = get_config(module)
|
|
||||||
lines = config.split('\n')
|
|
||||||
interfaces = {}
|
|
||||||
interface = None
|
|
||||||
for line in lines:
|
|
||||||
if line == 'exit':
|
|
||||||
if interface:
|
|
||||||
interfaces[interface[0]] = interface
|
|
||||||
interface = None
|
|
||||||
elif interface:
|
|
||||||
interface.append(line)
|
|
||||||
else:
|
|
||||||
match = re.match(r'^interface (.*)$', line)
|
|
||||||
if match:
|
|
||||||
interface = list()
|
|
||||||
interface.append(line)
|
|
||||||
|
|
||||||
return interfaces
|
|
||||||
|
|
||||||
|
|
||||||
def to_commands(module, commands):
|
|
||||||
spec = {
|
|
||||||
'command': dict(key=True),
|
|
||||||
'prompt': dict(),
|
|
||||||
'answer': dict()
|
|
||||||
}
|
|
||||||
transform = ComplexList(spec, module)
|
|
||||||
return transform(commands)
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
connection = get_connection(module)
|
|
||||||
try:
|
|
||||||
return connection.run_commands(commands=commands, check_rc=check_rc)
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, commands):
|
|
||||||
connection = get_connection(module)
|
|
||||||
|
|
||||||
try:
|
|
||||||
resp = connection.edit_config(commands)
|
|
||||||
return resp.get('response')
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
|
@ -1,91 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
|
||||||
# still belong to the author of the module, and may assign their own license
|
|
||||||
# to the complete work.
|
|
||||||
#
|
|
||||||
# (c) 2018 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
# are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
class InterfaceConfiguration:
|
|
||||||
def __init__(self):
|
|
||||||
self.commands = []
|
|
||||||
self.merged = False
|
|
||||||
|
|
||||||
def has_same_commands(self, interface):
|
|
||||||
len1 = len(self.commands)
|
|
||||||
len2 = len(interface.commands)
|
|
||||||
return len1 == len2 and len1 == len(frozenset(self.commands).intersection(interface.commands))
|
|
||||||
|
|
||||||
|
|
||||||
def merge_interfaces(interfaces):
|
|
||||||
""" to reduce commands generated by an edgeswitch module
|
|
||||||
we take interfaces one by one and we try to merge them with neighbors if everyone has same commands to run
|
|
||||||
"""
|
|
||||||
merged = {}
|
|
||||||
|
|
||||||
for i, interface in interfaces.items():
|
|
||||||
if interface.merged:
|
|
||||||
continue
|
|
||||||
interface.merged = True
|
|
||||||
|
|
||||||
match = re.match(r'(\d+)\/(\d+)', i)
|
|
||||||
group = int(match.group(1))
|
|
||||||
start = int(match.group(2))
|
|
||||||
end = start
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
start = start - 1
|
|
||||||
key = '{0}/{1}'.format(group, start)
|
|
||||||
neighbor = interfaces[key]
|
|
||||||
if not neighbor.merged and interface.has_same_commands(neighbor):
|
|
||||||
neighbor.merged = True
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
except KeyError:
|
|
||||||
break
|
|
||||||
start = start + 1
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
end = end + 1
|
|
||||||
key = '{0}/{1}'.format(group, end)
|
|
||||||
neighbor = interfaces[key]
|
|
||||||
if not neighbor.merged and interface.has_same_commands(neighbor):
|
|
||||||
neighbor.merged = True
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
except KeyError:
|
|
||||||
break
|
|
||||||
end = end - 1
|
|
||||||
|
|
||||||
if end == start:
|
|
||||||
key = '{0}/{1}'.format(group, start)
|
|
||||||
else:
|
|
||||||
key = '{0}/{1}-{2}/{3}'.format(group, start, group, end)
|
|
||||||
|
|
||||||
merged[key] = interface
|
|
||||||
return merged
|
|
|
@ -1,172 +0,0 @@
|
||||||
# This code is part of Ansible, but is an independent component.
|
|
||||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
|
||||||
# Modules you write using this snippet, which is embedded dynamically by
|
|
||||||
# Ansible still belong to the author of the module, and may assign their own
|
|
||||||
# license to the complete work.
|
|
||||||
#
|
|
||||||
# Copyright (C) 2017 Lenovo.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
# Contains utility methods
|
|
||||||
# Lenovo Networking
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection
|
|
||||||
from ansible.module_utils.connection import Connection, exec_command
|
|
||||||
from ansible.module_utils.connection import ConnectionError
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = {}
|
|
||||||
_CONNECTION = None
|
|
||||||
|
|
||||||
enos_provider_spec = {
|
|
||||||
'host': dict(),
|
|
||||||
'port': dict(type='int'),
|
|
||||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
|
||||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
|
||||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
|
||||||
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
|
|
||||||
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
|
|
||||||
'timeout': dict(type='int'),
|
|
||||||
'context': dict(),
|
|
||||||
'passwords': dict()
|
|
||||||
}
|
|
||||||
|
|
||||||
enos_argument_spec = {
|
|
||||||
'provider': dict(type='dict', options=enos_provider_spec),
|
|
||||||
}
|
|
||||||
|
|
||||||
command_spec = {
|
|
||||||
'command': dict(key=True),
|
|
||||||
'prompt': dict(),
|
|
||||||
'answer': dict()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_provider_argspec():
|
|
||||||
return enos_provider_spec
|
|
||||||
|
|
||||||
|
|
||||||
def check_args(module, warnings):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection(module):
|
|
||||||
global _CONNECTION
|
|
||||||
if _CONNECTION:
|
|
||||||
return _CONNECTION
|
|
||||||
_CONNECTION = Connection(module._socket_path)
|
|
||||||
|
|
||||||
context = None
|
|
||||||
try:
|
|
||||||
context = module.params['context']
|
|
||||||
except KeyError:
|
|
||||||
context = None
|
|
||||||
|
|
||||||
if context:
|
|
||||||
if context == 'system':
|
|
||||||
command = 'changeto system'
|
|
||||||
else:
|
|
||||||
command = 'changeto context %s' % context
|
|
||||||
_CONNECTION.get(command)
|
|
||||||
|
|
||||||
return _CONNECTION
|
|
||||||
|
|
||||||
|
|
||||||
def get_config(module, flags=None):
|
|
||||||
flags = [] if flags is None else flags
|
|
||||||
|
|
||||||
passwords = None
|
|
||||||
try:
|
|
||||||
passwords = module.params['passwords']
|
|
||||||
except KeyError:
|
|
||||||
passwords = None
|
|
||||||
if passwords:
|
|
||||||
cmd = 'more system:running-config'
|
|
||||||
else:
|
|
||||||
cmd = 'show running-config '
|
|
||||||
cmd += ' '.join(flags)
|
|
||||||
cmd = cmd.strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _DEVICE_CONFIGS[cmd]
|
|
||||||
except KeyError:
|
|
||||||
conn = get_connection(module)
|
|
||||||
out = conn.get(cmd)
|
|
||||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
|
||||||
_DEVICE_CONFIGS[cmd] = cfg
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
def to_commands(module, commands):
|
|
||||||
if not isinstance(commands, list):
|
|
||||||
raise AssertionError('argument must be of type <list>')
|
|
||||||
|
|
||||||
transform = EntityCollection(module, command_spec)
|
|
||||||
commands = transform(commands)
|
|
||||||
|
|
||||||
for index, item in enumerate(commands):
|
|
||||||
if module.check_mode and not item['command'].startswith('show'):
|
|
||||||
module.warn('only show commands are supported when using check '
|
|
||||||
'mode, not executing `%s`' % item['command'])
|
|
||||||
|
|
||||||
return commands
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
connection = get_connection(module)
|
|
||||||
|
|
||||||
commands = to_commands(module, to_list(commands))
|
|
||||||
|
|
||||||
responses = list()
|
|
||||||
|
|
||||||
for cmd in commands:
|
|
||||||
out = connection.get(**cmd)
|
|
||||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
def load_config(module, config):
|
|
||||||
try:
|
|
||||||
conn = get_connection(module)
|
|
||||||
conn.get('enable')
|
|
||||||
conn.edit_config(config)
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
||||||
|
|
||||||
|
|
||||||
def get_defaults_flag(module):
|
|
||||||
rc, out, err = exec_command(module, 'show running-config ?')
|
|
||||||
out = to_text(out, errors='surrogate_then_replace')
|
|
||||||
|
|
||||||
commands = set()
|
|
||||||
for line in out.splitlines():
|
|
||||||
if line:
|
|
||||||
commands.add(line.strip().split()[0])
|
|
||||||
|
|
||||||
if 'all' in commands:
|
|
||||||
return 'all'
|
|
||||||
else:
|
|
||||||
return 'full'
|
|
|
@ -1,49 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2019 Ericsson AB.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.basic import env_fallback
|
|
||||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
|
||||||
from ansible.module_utils.connection import Connection, ConnectionError
|
|
||||||
|
|
||||||
_DEVICE_CONFIGS = {}
|
|
||||||
|
|
||||||
|
|
||||||
def get_connection(module):
|
|
||||||
if hasattr(module, '_eric_eccli_connection'):
|
|
||||||
return module._eric_eccli_connection
|
|
||||||
|
|
||||||
capabilities = get_capabilities(module)
|
|
||||||
network_api = capabilities.get('network_api')
|
|
||||||
if network_api == 'cliconf':
|
|
||||||
module._eric_eccli_connection = Connection(module._socket_path)
|
|
||||||
else:
|
|
||||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
|
||||||
|
|
||||||
return module._eric_eccli_connection
|
|
||||||
|
|
||||||
|
|
||||||
def get_capabilities(module):
|
|
||||||
if hasattr(module, '_eric_eccli_capabilities'):
|
|
||||||
return module._eric_eccli_capabilities
|
|
||||||
try:
|
|
||||||
capabilities = Connection(module._socket_path).get_capabilities()
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
|
||||||
module._eric_eccli_capabilities = json.loads(capabilities)
|
|
||||||
return module._eric_eccli_capabilities
|
|
||||||
|
|
||||||
|
|
||||||
def run_commands(module, commands, check_rc=True):
|
|
||||||
connection = get_connection(module)
|
|
||||||
try:
|
|
||||||
return connection.run_commands(commands=commands, check_rc=check_rc)
|
|
||||||
except ConnectionError as exc:
|
|
||||||
module.fail_json(msg=to_text(exc))
|
|
|
@ -1,23 +0,0 @@
|
||||||
#
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 Red Hat
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
"""
|
|
||||||
The arg spec for the exos facts module.
|
|
||||||
"""
|
|
||||||
from __future__ import absolute_import, division, print_function
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class FactsArgs(object): # pylint: disable=R0903
|
|
||||||
""" The arg spec for the exos facts module
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
argument_spec = {
|
|
||||||
'gather_subset': dict(default=['!config'], type='list'),
|
|
||||||
'gather_network_resources': dict(type='list'),
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
#
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 Red Hat
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
#############################################
|
|
||||||
# WARNING #
|
|
||||||
#############################################
|
|
||||||
#
|
|
||||||
# This file is auto generated by the resource
|
|
||||||
# module builder playbook.
|
|
||||||
#
|
|
||||||
# Do not edit this file manually.
|
|
||||||
#
|
|
||||||
# Changes to this file will be over written
|
|
||||||
# by the resource module builder.
|
|
||||||
#
|
|
||||||
# Changes should be made in the model used to
|
|
||||||
# generate this file or in the resource module
|
|
||||||
# builder template.
|
|
||||||
#
|
|
||||||
#############################################
|
|
||||||
"""
|
|
||||||
The arg spec for the exos_l2_interfaces module
|
|
||||||
"""
|
|
||||||
from __future__ import absolute_import, division, print_function
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class L2_interfacesArgs(object): # pylint: disable=R0903
|
|
||||||
"""The arg spec for the exos_l2_interfaces module
|
|
||||||
"""
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
argument_spec = {
|
|
||||||
'config': {
|
|
||||||
'elements': 'dict',
|
|
||||||
'options': {
|
|
||||||
'access': {'options': {'vlan': {'type': 'int'}},
|
|
||||||
'type': 'dict'},
|
|
||||||
'name': {'required': True, 'type': 'str'},
|
|
||||||
'trunk': {'options': {'native_vlan': {'type': 'int'}, 'trunk_allowed_vlans': {'type': 'list'}},
|
|
||||||
'type': 'dict'}},
|
|
||||||
'type': 'list'},
|
|
||||||
'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'], 'default': 'merged', 'type': 'str'}
|
|
||||||
} # pylint: disable=C0301
|
|
|
@ -1,57 +0,0 @@
|
||||||
#
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 Red Hat
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
#############################################
|
|
||||||
# WARNING #
|
|
||||||
#############################################
|
|
||||||
#
|
|
||||||
# This file is auto generated by the resource
|
|
||||||
# module builder playbook.
|
|
||||||
#
|
|
||||||
# Do not edit this file manually.
|
|
||||||
#
|
|
||||||
# Changes to this file will be over written
|
|
||||||
# by the resource module builder.
|
|
||||||
#
|
|
||||||
# Changes should be made in the model used to
|
|
||||||
# generate this file or in the resource module
|
|
||||||
# builder template.
|
|
||||||
#
|
|
||||||
#############################################
|
|
||||||
|
|
||||||
"""
|
|
||||||
The arg spec for the exos_lldp_global module
|
|
||||||
"""
|
|
||||||
from __future__ import absolute_import, division, print_function
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class Lldp_globalArgs(object): # pylint: disable=R0903
|
|
||||||
"""The arg spec for the exos_lldp_global module
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
argument_spec = {
|
|
||||||
'config': {
|
|
||||||
'options': {
|
|
||||||
'interval': {'default': 30, 'type': 'int'},
|
|
||||||
'tlv_select': {
|
|
||||||
'options': {
|
|
||||||
'management_address': {'type': 'bool'},
|
|
||||||
'port_description': {'type': 'bool'},
|
|
||||||
'system_capabilities': {'type': 'bool'},
|
|
||||||
'system_description': {
|
|
||||||
'default': True,
|
|
||||||
'type': 'bool'},
|
|
||||||
'system_name': {'default': True, 'type': 'bool'}},
|
|
||||||
'type': 'dict'}},
|
|
||||||
'type': 'dict'},
|
|
||||||
'state': {
|
|
||||||
'choices': ['merged', 'replaced', 'deleted'],
|
|
||||||
'default': 'merged',
|
|
||||||
'type': 'str'}} # pylint: disable=C0301
|
|
|
@ -1,49 +0,0 @@
|
||||||
#
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2019 Red Hat
|
|
||||||
# GNU General Public License v3.0+
|
|
||||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
#############################################
|
|
||||||
# WARNING #
|
|
||||||
#############################################
|
|
||||||
#
|
|
||||||
# This file is auto generated by the resource
|
|
||||||
# module builder playbook.
|
|
||||||
#
|
|
||||||
# Do not edit this file manually.
|
|
||||||
#
|
|
||||||
# Changes to this file will be over written
|
|
||||||
# by the resource module builder.
|
|
||||||
#
|
|
||||||
# Changes should be made in the model used to
|
|
||||||
# generate this file or in the resource module
|
|
||||||
# builder template.
|
|
||||||
#
|
|
||||||
#############################################
|
|
||||||
|
|
||||||
"""
|
|
||||||
The arg spec for the exos_lldp_interfaces module
|
|
||||||
"""
|
|
||||||
from __future__ import absolute_import, division, print_function
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class Lldp_interfacesArgs(object): # pylint: disable=R0903
|
|
||||||
"""The arg spec for the exos_lldp_interfaces module
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
argument_spec = {
|
|
||||||
'config': {
|
|
||||||
'elements': 'dict',
|
|
||||||
'options': {
|
|
||||||
'enabled': {'type': 'bool'},
|
|
||||||
'name': {'required': True, 'type': 'str'}},
|
|
||||||
'type': 'list'},
|
|
||||||
'state': {
|
|
||||||
'choices': ['merged', 'replaced', 'overridden', 'deleted'],
|
|
||||||
'default': 'merged',
|
|
||||||
'type': 'str'}} # pylint: disable=C0301
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue